Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ def get_available_models() -> List[str]:
|
|
| 22 |
response = requests.get("https://openrouter.ai/api/v1/models", headers=headers)
|
| 23 |
response.raise_for_status()
|
| 24 |
models = response.json()
|
| 25 |
-
return [model["id"] for model in models]
|
| 26 |
except Exception as e:
|
| 27 |
print(f"Error fetching models: {e}")
|
| 28 |
# Fallback to a basic list of known models
|
|
@@ -118,30 +118,32 @@ IMPORTANT: Your response must be a valid JSON object with the following structur
|
|
| 118 |
step_md += "---\n\n"
|
| 119 |
|
| 120 |
markdown_output += step_md
|
| 121 |
-
yield markdown_output # Update the output incrementally
|
| 122 |
|
| 123 |
messages.append({"role": "assistant", "content": json.dumps(step_data.model_dump())})
|
| 124 |
|
| 125 |
if step_data.next_action == 'final_answer' and step_count < 15:
|
| 126 |
messages.append({"role": "user", "content": "Please continue your analysis with at least 5 more steps before providing the final answer."})
|
| 127 |
-
elif step_data.next_action == 'final_answer':
|
| 128 |
break
|
| 129 |
elif step_data.next_action == 'reflect' or step_count % 3 == 0:
|
| 130 |
-
messages.append({"role": "user", "content": "
|
| 131 |
else:
|
| 132 |
messages.append({"role": "user", "content": "Please continue with the next step in your analysis."})
|
| 133 |
|
| 134 |
step_count += 1
|
|
|
|
| 135 |
|
| 136 |
# Generate final answer
|
| 137 |
final_data = make_api_call(model, system_prompt, messages, 750, is_final_answer=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
final_md += f"**Confidence:** {final_data.confidence:.2f}\n\n"
|
| 142 |
-
|
| 143 |
-
markdown_output += final_md
|
| 144 |
-
yield markdown_output
|
| 145 |
|
| 146 |
def create_interface():
|
| 147 |
# Check for API key
|
|
@@ -161,18 +163,21 @@ def create_interface():
|
|
| 161 |
value=available_models[0],
|
| 162 |
label="Select Model"
|
| 163 |
)
|
|
|
|
|
|
|
| 164 |
query_input = gr.Textbox(
|
|
|
|
| 165 |
label="Enter your query:",
|
| 166 |
placeholder="e.g., What are the potential long-term effects of climate change on global agriculture?"
|
| 167 |
)
|
| 168 |
submit_btn = gr.Button("Generate Response")
|
| 169 |
|
| 170 |
-
output_box = gr.Markdown(label="Response")
|
| 171 |
|
| 172 |
submit_btn.click(
|
| 173 |
fn=generate_response,
|
| 174 |
inputs=[query_input, model_dropdown],
|
| 175 |
-
outputs=
|
| 176 |
)
|
| 177 |
|
| 178 |
return interface
|
|
|
|
| 22 |
response = requests.get("https://openrouter.ai/api/v1/models", headers=headers)
|
| 23 |
response.raise_for_status()
|
| 24 |
models = response.json()
|
| 25 |
+
return [model["id"] for model in models.data]
|
| 26 |
except Exception as e:
|
| 27 |
print(f"Error fetching models: {e}")
|
| 28 |
# Fallback to a basic list of known models
|
|
|
|
| 118 |
step_md += "---\n\n"
|
| 119 |
|
| 120 |
markdown_output += step_md
|
| 121 |
+
#yield markdown_output # Update the output incrementally
|
| 122 |
|
| 123 |
messages.append({"role": "assistant", "content": json.dumps(step_data.model_dump())})
|
| 124 |
|
| 125 |
if step_data.next_action == 'final_answer' and step_count < 15:
|
| 126 |
messages.append({"role": "user", "content": "Please continue your analysis with at least 5 more steps before providing the final answer."})
|
| 127 |
+
elif step_data.next_action == 'final_answer':Please
|
| 128 |
break
|
| 129 |
elif step_data.next_action == 'reflect' or step_count % 3 == 0:
|
| 130 |
+
messages.append({"role": "user", "content": " perform a detailed self-reflection on your reasoning so far."})
|
| 131 |
else:
|
| 132 |
messages.append({"role": "user", "content": "Please continue with the next step in your analysis."})
|
| 133 |
|
| 134 |
step_count += 1
|
| 135 |
+
yield messages
|
| 136 |
|
| 137 |
# Generate final answer
|
| 138 |
final_data = make_api_call(model, system_prompt, messages, 750, is_final_answer=True)
|
| 139 |
+
yield messages
|
| 140 |
+
|
| 141 |
+
#final_md = f"### Final Answer\n\n"
|
| 142 |
+
#final_md += f"{final_data.content}\n\n"
|
| 143 |
+
#final_md += f"**Confidence:** {final_data.confidence:.2f}\n\n"
|
| 144 |
|
| 145 |
+
#markdown_output += final_md
|
| 146 |
+
#yield markdown_output
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
def create_interface():
|
| 149 |
# Check for API key
|
|
|
|
| 163 |
value=available_models[0],
|
| 164 |
label="Select Model"
|
| 165 |
)
|
| 166 |
+
chatbot = gr.Chatbot()
|
| 167 |
+
|
| 168 |
query_input = gr.Textbox(
|
| 169 |
+
lines=5
|
| 170 |
label="Enter your query:",
|
| 171 |
placeholder="e.g., What are the potential long-term effects of climate change on global agriculture?"
|
| 172 |
)
|
| 173 |
submit_btn = gr.Button("Generate Response")
|
| 174 |
|
| 175 |
+
#output_box = gr.Markdown(label="Response")
|
| 176 |
|
| 177 |
submit_btn.click(
|
| 178 |
fn=generate_response,
|
| 179 |
inputs=[query_input, model_dropdown],
|
| 180 |
+
outputs=chatbot
|
| 181 |
)
|
| 182 |
|
| 183 |
return interface
|