import os import openai from openai import OpenAI import gradio as gr import requests import pandas as pd from smolagents import CodeAgent, DuckDuckGoSearchTool, tool # --- Constants --- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" # --- Configure OpenAI SDK & Client --- openai_api_key = os.getenv("OPENAI_API_KEY") if not openai_api_key: raise RuntimeError("Please set OPENAI_API_KEY in your Space secrets or env!") openai.api_key = openai_api_key client = OpenAI() OPENAI_MODEL_ID = os.getenv("OPENAI_MODEL_ID", "gpt-4.1") # --- Model Wrapper for CodeAgent compatibility --- class OpenAIModelWrapper: def __init__(self, model_id: str, client: OpenAI): self.model_id = model_id self.client = client def __call__(self, prompt: str, **kwargs) -> str: # ignore extra kwargs (stop_sequences, temperature, etc.) resp = self.client.responses.create( model=self.model_id, input=prompt ) return resp.output_text # --- Tool Definitions --- @tool def summarize_query(query: str) -> str: """ Reframes an unclear query into a concise one. """ return f"Summarize and reframe: {query}" @tool def wikipedia_search(page: str) -> str: """ Fetches the summary extract of an English Wikipedia page. """ url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{page}" r = requests.get(url, timeout=10) r.raise_for_status() return r.json().get("extract", "") search_tool = DuckDuckGoSearchTool() wiki_tool = wikipedia_search summarize_tool = summarize_query # --- ReACT + Scratchpad + Auto-Retry Instruction Prompt --- instruction_prompt = """ You are a ReACT agent with three tools: • DuckDuckGoSearchTool(query: str) • wikipedia_search(page: str) • summarize_query(query: str) Internally, for each question: 1. Thought: decide which tool to call. 2. Action: call the chosen tool. 3. Observation: record the result. 4. If empty/irrelevant: Thought: retry with summarize_query + DuckDuckGoSearchTool. Record new Observation. 5. Thought: integrate observations. Finally, output your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. Rules: - Numbers: digits only. - Lists: comma-separated, no extra punctuation. - Strings: no filler words. """ # --- Build the CodeAgent --- llm_wrapper = OpenAIModelWrapper(model_id=OPENAI_MODEL_ID, client=client) smart_agent = CodeAgent( tools=[search_tool, wiki_tool, summarize_tool], model=llm_wrapper ) # --- BasicAgent wrapper for Gradio --- class BasicAgent: def __init__(self): print("SmolAgent (GPT-4.1) with ReACT & tools initialized.") def __call__(self, question: str) -> str: prompt = instruction_prompt.strip() + "\n\nQUESTION: " + question.strip() try: return smart_agent.run(prompt) except Exception as e: return f"AGENT ERROR: {e}" # --- Gradio / Submission Logic --- def run_and_submit_all(profile: gr.OAuthProfile | None): if not profile: return "Please log in to Hugging Face.", None username = profile.username space_id = os.getenv("SPACE_ID", "") agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" agent = BasicAgent() # Fetch questions try: resp = requests.get(f"{DEFAULT_API_URL}/questions", timeout=15) resp.raise_for_status() questions = resp.json() or [] except Exception as e: return f"Error fetching questions: {e}", None # Run agent logs, payload = [], [] for item in questions: tid = item.get("task_id") q = item.get("question") if not tid or not q: continue ans = agent(q) logs.append({"Task ID": tid, "Question": q, "Submitted Answer": ans}) payload.append({"task_id": tid, "submitted_answer": ans}) if not payload: return "Agent did not produce any answers.", pd.DataFrame(logs) # Submit submission = {"username": username, "agent_code": agent_code, "answers": payload} try: post = requests.post(f"{DEFAULT_API_URL}/submit", json=submission, timeout=60) post.raise_for_status() res = post.json() status = ( f"Submission Successful!\n" f"User: {res.get('username')}\n" f"Overall Score: {res.get('score','N/A')}% " f"({res.get('correct_count','?')}/{res.get('total_attempted','?')})\n" f"Message: {res.get('message','')}" ) return status, pd.DataFrame(logs) except Exception as e: return f"Submission Failed: {e}", pd.DataFrame(logs) # --- Gradio Interface --- with gr.Blocks() as demo: gr.Markdown("# SmolAgent GAIA Runner 🚀") gr.Markdown(""" **Instructions:** 1. Clone this space. 2. Add `OPENAI_API_KEY` (and optionally `OPENAI_MODEL_ID`) in Settings → Secrets. 3. Log in to Hugging Face. 4. Click **Run Evaluation & Submit All Answers**. """) gr.LoginButton() run_btn = gr.Button("Run Evaluation & Submit All Answers") status_out = gr.Textbox(label="Status", lines=5, interactive=False) table_out = gr.DataFrame(label="Questions & Answers", wrap=True) run_btn.click(fn=run_and_submit_all, outputs=[status_out, table_out]) if __name__ == "__main__": demo.launch(debug=True, share=False)