File size: 5,940 Bytes
0412d41 edc547a 1856408 f1dea33 0412d41 1856408 0412d41 aee5bda 0af627b 1856408 aee5bda 1856408 aee5bda 65adf02 aee5bda 56f1088 aee5bda 56f1088 aee5bda 56f1088 aee5bda edc547a 56f1088 edc547a aee5bda 65adf02 aee5bda 56f1088 edc547a aee5bda edc547a aee5bda 56f1088 aee5bda 1856408 aee5bda 65adf02 56f1088 edc547a 1856408 edc547a 65adf02 0412d41 edc547a 0412d41 aee5bda 63d6133 edc547a 63d6133 0412d41 65adf02 0412d41 2ae2c93 edc547a 65adf02 aee5bda 56f1088 0412d41 56f1088 0412d41 56f1088 a058371 aee5bda 0412d41 56f1088 aee5bda a058371 aee5bda edc547a 0412d41 63d6133 aee5bda a058371 aee5bda 56f1088 aee5bda 0412d41 aee5bda 2ae2c93 a058371 aee5bda 56f1088 edc547a 0412d41 a058371 0412d41 a058371 0412d41 1856408 0412d41 edc547a 0412d41 1856408 63d6133 aee5bda 63d6133 0412d41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import os
import openai
from openai import OpenAI
import gradio as gr
import requests
import pandas as pd
from smolagents import CodeAgent, DuckDuckGoSearchTool, tool
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Configure OpenAI SDK & Client ---
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
raise RuntimeError("Please set OPENAI_API_KEY in your Space secrets or env!")
openai.api_key = openai_api_key
client = OpenAI()
OPENAI_MODEL_ID = os.getenv("OPENAI_MODEL_ID", "gpt-4.1")
# --- Model Wrapper for CodeAgent compatibility ---
class OpenAIModelWrapper:
def __init__(self, model_id: str, client: OpenAI):
self.model_id = model_id
self.client = client
def __call__(self, prompt: str, **kwargs) -> str:
# ignore extra kwargs (stop_sequences, temperature, etc.)
resp = self.client.responses.create(
model=self.model_id,
input=prompt
)
return resp.output_text
# --- Tool Definitions ---
@tool
def summarize_query(query: str) -> str:
"""
Reframes an unclear query into a concise one.
"""
return f"Summarize and reframe: {query}"
@tool
def wikipedia_search(page: str) -> str:
"""
Fetches the summary extract of an English Wikipedia page.
"""
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{page}"
r = requests.get(url, timeout=10)
r.raise_for_status()
return r.json().get("extract", "")
search_tool = DuckDuckGoSearchTool()
wiki_tool = wikipedia_search
summarize_tool = summarize_query
# --- ReACT + Scratchpad + Auto-Retry Instruction Prompt ---
instruction_prompt = """
You are a ReACT agent with three tools:
• DuckDuckGoSearchTool(query: str)
• wikipedia_search(page: str)
• summarize_query(query: str)
Internally, for each question:
1. Thought: decide which tool to call.
2. Action: call the chosen tool.
3. Observation: record the result.
4. If empty/irrelevant:
Thought: retry with summarize_query + DuckDuckGoSearchTool.
Record new Observation.
5. Thought: integrate observations.
Finally, output your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
Rules:
- Numbers: digits only.
- Lists: comma-separated, no extra punctuation.
- Strings: no filler words.
"""
# --- Build the CodeAgent ---
llm_wrapper = OpenAIModelWrapper(model_id=OPENAI_MODEL_ID, client=client)
smart_agent = CodeAgent(
tools=[search_tool, wiki_tool, summarize_tool],
model=llm_wrapper
)
# --- BasicAgent wrapper for Gradio ---
class BasicAgent:
def __init__(self):
print("SmolAgent (GPT-4.1) with ReACT & tools initialized.")
def __call__(self, question: str) -> str:
prompt = instruction_prompt.strip() + "\n\nQUESTION: " + question.strip()
try:
return smart_agent.run(prompt)
except Exception as e:
return f"AGENT ERROR: {e}"
# --- Gradio / Submission Logic ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
if not profile:
return "Please log in to Hugging Face.", None
username = profile.username
space_id = os.getenv("SPACE_ID", "")
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
agent = BasicAgent()
# Fetch questions
try:
resp = requests.get(f"{DEFAULT_API_URL}/questions", timeout=15)
resp.raise_for_status()
questions = resp.json() or []
except Exception as e:
return f"Error fetching questions: {e}", None
# Run agent
logs, payload = [], []
for item in questions:
tid = item.get("task_id")
q = item.get("question")
if not tid or not q:
continue
ans = agent(q)
logs.append({"Task ID": tid, "Question": q, "Submitted Answer": ans})
payload.append({"task_id": tid, "submitted_answer": ans})
if not payload:
return "Agent did not produce any answers.", pd.DataFrame(logs)
# Submit
submission = {"username": username, "agent_code": agent_code, "answers": payload}
try:
post = requests.post(f"{DEFAULT_API_URL}/submit", json=submission, timeout=60)
post.raise_for_status()
res = post.json()
status = (
f"Submission Successful!\n"
f"User: {res.get('username')}\n"
f"Overall Score: {res.get('score','N/A')}% "
f"({res.get('correct_count','?')}/{res.get('total_attempted','?')})\n"
f"Message: {res.get('message','')}"
)
return status, pd.DataFrame(logs)
except Exception as e:
return f"Submission Failed: {e}", pd.DataFrame(logs)
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# SmolAgent GAIA Runner 🚀")
gr.Markdown("""
**Instructions:**
1. Clone this space.
2. Add `OPENAI_API_KEY` (and optionally `OPENAI_MODEL_ID`) in Settings → Secrets.
3. Log in to Hugging Face.
4. Click **Run Evaluation & Submit All Answers**.
""")
gr.LoginButton()
run_btn = gr.Button("Run Evaluation & Submit All Answers")
status_out = gr.Textbox(label="Status", lines=5, interactive=False)
table_out = gr.DataFrame(label="Questions & Answers", wrap=True)
run_btn.click(fn=run_and_submit_all, outputs=[status_out, table_out])
if __name__ == "__main__":
demo.launch(debug=True, share=False)
|