from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool from smolagents.default_tools import FinalAnswerTool, VisitWebpageTool from Gradio_UI import GradioUI import datetime import requests import pytz import yaml from typing import Optional from my_tools import * # Choose which LLM engine to use! model = HfApiModel() # model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct") # For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' # model = LiteLLMModel(model_id="gt-4o") # If the agent does not answer, the model is overloaded, please use another model or the original Hugging Face Endpoint for contains qwen2.5 coder: # 'Qwen/Qwen2.5-Coder-32B-Instruct' model = HfApiModel( max_tokens=2096, temperature=0.5, model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # Endpoint with 'Qwen/Qwen2.5-Coder-32B-Instruct' custom_role_conversions=None, ) # Init tools final_answer = FinalAnswerTool() image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) search_tool = DuckDuckGoSearchTool() visit_webpage_tool = VisitWebpageTool() with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[ final_answer, search_tool, image_generation_tool, convert_currency, get_weather, get_news_headlines, get_joke, get_random_fact, search_wikipedia, get_current_time_in_timezone, visit_webpage_tool ], max_steps=10, verbosity_level=4, grammar=None, planning_interval=3, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch()