File size: 2,716 Bytes
c6930b6
 
f7ddc9f
401aee4
 
 
 
 
 
499aff8
 
401aee4
 
 
 
 
 
 
3b927dc
 
401aee4
 
 
 
 
 
 
 
 
 
3b927dc
 
401aee4
 
4f7f282
 
5952485
975772b
95e6285
401aee4
 
 
 
 
 
 
bb94f59
 
 
 
 
401aee4
499aff8
bb94f59
 
 
 
 
 
 
 
 
401aee4
 
 
 
499aff8
401aee4
 
 
 
3b927dc
401aee4
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import smolagents
print("smolagents version:", smolagents.__version__)
from smolagents import CodeAgent, WebSearchTool, PythonInterpreterTool, InferenceClientModel
import requests
import os

# --- 1. API GAIA ---
HF_TOKEN = os.getenv("token_curso")

# BASE debe ser la raíz del Space, sin /api
BASE = "http://localhost:7860"
QUESTIONS_URL = f"{BASE}/questions"
SUBMIT_URL    = f"{BASE}/submit"

HF_USERNAME    = "jbaselga"
AGENT_CODE_URL = "https://huggingface.co/spaces/jbaselga/agentes-unit4/tree/main"

def fetch_gaia_questions():
    resp = requests.get(QUESTIONS_URL)
    return resp.json()

def submit_answers(answers: dict):
    payload = {
        "username": HF_USERNAME,
        "agent_code": AGENT_CODE_URL,
        "answers": [
            {"task_id": tid, "submitted_answer": ans}
            for tid, ans in answers.items()
        ]
    }
    resp = requests.post(SUBMIT_URL, json=payload)
    return resp.json()

# --- 2. Configuración del agente ---
model = InferenceClientModel(model="mistralai/Mistral-7B-Instruct-v0.2")
#model = InferenceClientModel(model="HuggingFaceH4/zephyr-7b-beta")
# model = InferenceClientModel(model="Qwen/Qwen1.5-7B-Chat")


tools = [
    WebSearchTool(),            # Búsqueda web (usa DuckDuckGo)
    PythonInterpreterTool()     # Ejecuta código Python (incluye cálculos)
]
agent = CodeAgent(tools=tools, model=model, add_base_tools=True, max_steps=10)

# --- 3. Función para responder preguntas ---
#def answer_question(qid: str, question: str) -> str:
#    prompt = f"{question}\nResponde SOLO la respuesta final, sin explicaciones ni justificación."
#    out = agent.run(prompt)
    # Devuelve solo la primera línea, por si acaso
#    return out.strip().split('\n')[0]
def answer_question(qid: str, question: str) -> str:
    prompt = f"{question}\nResponde SOLO la respuesta final, sin explicaciones ni justificación."
    try:
        out = agent.run(prompt)
        if out is None:
            print(f"❌ [qid={qid}] El modelo devolvió None.")
            return "ERROR"
        return out.strip().split('\n')[0]
    except Exception as e:
        print(f"❌ [qid={qid}] Error al generar respuesta: {e}")
        return "ERROR"

# --- 4. Flujo principal ---
def main():
    qs = fetch_gaia_questions()
    print("DEBUG qs:", qs)
    answers = {}
    for item in qs:
        print("DEBUG item:", item)
        if isinstance(item, dict) and "task_id" in item and "question" in item:
            answers[item["task_id"]] = answer_question(item["task_id"], item["question"])
        else:
            print("Formato inesperado:", item)
    result = submit_answers(answers)
    print("🧪 Resultados GAIA:", result)

if __name__ == "__main__":
    main()