Spaces:
Running
Running
Commit
·
038d6db
1
Parent(s):
e53f134
feat: change default model from olmOCR to Gemini 3 Pro Preview
Browse filesUpdate default LLM backend from olmOCR-2-7B to gemini-3-pro-preview and consolidate Gemini model configuration. Remove duplicate gemini-3-pro-preview entry from MODELS_MAP and update MODEL_GEMINI constant to use the pro preview variant.
- Change default model_selector value from MODEL_OLMOCR to MODEL_GEMINI
- Update MODEL_GEMINI constant from "gemini-2.5-flash" to "gemini-3-pro-preview"
- Consolidate gemini-3-pro-preview pricing ($
app.py
CHANGED
|
@@ -141,7 +141,7 @@ def build_interface() -> gr.Blocks:
|
|
| 141 |
model_selector = gr.Dropdown(
|
| 142 |
label="LLM backend",
|
| 143 |
choices=list(MODELS_MAP.keys()),
|
| 144 |
-
value=
|
| 145 |
)
|
| 146 |
|
| 147 |
prompt_editor = gr.Textbox(
|
|
|
|
| 141 |
model_selector = gr.Dropdown(
|
| 142 |
label="LLM backend",
|
| 143 |
choices=list(MODELS_MAP.keys()),
|
| 144 |
+
value=MODEL_GEMINI,
|
| 145 |
)
|
| 146 |
|
| 147 |
prompt_editor = gr.Textbox(
|
common.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
MODEL_GEMINI = "gemini-
|
| 2 |
MODEL_OLMOCR = "olmOCR-2-7B-1025-FP8"
|
| 3 |
|
| 4 |
|
|
@@ -19,8 +19,7 @@ MODELS_MAP = {
|
|
| 19 |
"gpt-4.1-mini": {"input": 0.40, "output": 1.60, "backend": "openai"},
|
| 20 |
"gpt-4.1-nano": {"input": 0.10, "output": 0.40, "backend": "openai"},
|
| 21 |
# Other backends (mock rates)
|
| 22 |
-
MODEL_GEMINI: {"input":
|
| 23 |
-
"gemini-3-pro-preview": {"input": 2.00, "output": 12.00, "backend": "gemini"},
|
| 24 |
MODEL_OLMOCR: {"input": 1.35, "output": 0.30, "backend": "olmocr"},
|
| 25 |
"gemini-2.5-pro": {"input": 1.25, "output": 10.00, "backend": "gemini"},
|
| 26 |
"default": {"input": 2.50, "output": 10.00, "backend": "openai"},
|
|
|
|
| 1 |
+
MODEL_GEMINI = "gemini-3-pro-preview"
|
| 2 |
MODEL_OLMOCR = "olmOCR-2-7B-1025-FP8"
|
| 3 |
|
| 4 |
|
|
|
|
| 19 |
"gpt-4.1-mini": {"input": 0.40, "output": 1.60, "backend": "openai"},
|
| 20 |
"gpt-4.1-nano": {"input": 0.10, "output": 0.40, "backend": "openai"},
|
| 21 |
# Other backends (mock rates)
|
| 22 |
+
MODEL_GEMINI: {"input": 2.00, "output": 12.00, "backend": "gemini"},
|
|
|
|
| 23 |
MODEL_OLMOCR: {"input": 1.35, "output": 0.30, "backend": "olmocr"},
|
| 24 |
"gemini-2.5-pro": {"input": 1.25, "output": 10.00, "backend": "gemini"},
|
| 25 |
"default": {"input": 2.50, "output": 10.00, "backend": "openai"},
|