Spaces:
Sleeping
Sleeping
File size: 1,849 Bytes
d034bf1 9b5b26a c19d193 6aae614 afb3db0 8fe992b 9b5b26a 987f003 afb3db0 987f003 d034bf1 987f003 ff39be9 e121372 bf6d34c ff39be9 fe328e0 13d500a 8c01ffb d034bf1 9b5b26a 987f003 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 987f003 d034bf1 987f003 1056960 8c01ffb 987f003 8c01ffb 861422e 8fe992b 9b5b26a d034bf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from tools import *
from Gradio_UI import GradioUI
from typing import Optional
import requests
# from smolagents.agents import ToolCallingAgent
from smolagents import CodeAgent, HfApiModel
# Choose which LLM engine to use!
model = HfApiModel()
# model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct")
# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620'
# model = LiteLLMModel(model_id="gt-4o")
# If the agent does not answer, the model is overloaded, please use another model or the original Hugging Face Endpoint for contains qwen2.5 coder:
# 'Qwen/Qwen2.5-Coder-32B-Instruct'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # Endpoint with 'Qwen/Qwen2.5-Coder-32B-Instruct'
custom_role_conversions=None,
)
# Init tools
final_answer = FinalAnswerTool()
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
search_tool = DuckDuckGoSearchTool()
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
final_answer,
search_tool,
image_generation_tool,
convert_currency,
get_weather,
get_news_headlines,
get_joke,
get_random_fact,
search_wikipedia,
get_current_time_in_timezone
],
max_steps=10,
verbosity_level=4,
grammar=None,
planning_interval=3,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch(share=True) |