darrenphodgson76 commited on
Commit
1d91d99
·
verified ·
1 Parent(s): 4df1ec4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -12
app.py CHANGED
@@ -1,11 +1,10 @@
1
- from smolagents import CodeAgent, HfApiModel,tool
2
  import yaml
3
  from tools.final_answer import FinalAnswerTool
4
  import wikipedia
5
  from Gradio_UI import GradioUI
6
 
7
- # Below is aa tool that searches wikipedia
8
-
9
  @tool
10
  def wikipedia_search(query: str, sentences: int = 2) -> str:
11
  """Search Wikipedia and return a short summary.
@@ -26,21 +25,19 @@ def wikipedia_search(query: str, sentences: int = 2) -> str:
26
 
27
  final_answer = FinalAnswerTool()
28
 
29
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
30
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
31
-
32
  model = HfApiModel(
33
- max_tokens=2096,
34
- temperature=0.5,
35
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
36
- custom_role_conversions=None,
37
  )
 
38
  with open("prompts.yaml", 'r') as stream:
39
  prompt_templates = yaml.safe_load(stream)
40
 
41
  agent = CodeAgent(
42
  model=model,
43
- tools=[final_answer, wikipedia_search], ## add your tools here (don't remove final answer)
44
  max_steps=6,
45
  verbosity_level=1,
46
  grammar=None,
@@ -50,5 +47,40 @@ agent = CodeAgent(
50
  prompt_templates=prompt_templates
51
  )
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- GradioUI(agent).launch()
 
 
1
+ from smolagents import CodeAgent, HfApiModel, tool
2
  import yaml
3
  from tools.final_answer import FinalAnswerTool
4
  import wikipedia
5
  from Gradio_UI import GradioUI
6
 
7
+ # Wikipedia search tool
 
8
  @tool
9
  def wikipedia_search(query: str, sentences: int = 2) -> str:
10
  """Search Wikipedia and return a short summary.
 
25
 
26
  final_answer = FinalAnswerTool()
27
 
 
 
 
28
  model = HfApiModel(
29
+ max_tokens=2096,
30
+ temperature=0.5,
31
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
32
+ custom_role_conversions=None,
33
  )
34
+
35
  with open("prompts.yaml", 'r') as stream:
36
  prompt_templates = yaml.safe_load(stream)
37
 
38
  agent = CodeAgent(
39
  model=model,
40
+ tools=[final_answer, wikipedia_search],
41
  max_steps=6,
42
  verbosity_level=1,
43
  grammar=None,
 
47
  prompt_templates=prompt_templates
48
  )
49
 
50
+ # Custom GradioUI that resets agent context after 4 user messages/responses.
51
+ class CustomGradioUI(GradioUI):
52
+ def __init__(self, agent, max_messages=4):
53
+ super().__init__(agent)
54
+ self.max_messages = max_messages
55
+ self.message_count = 0
56
+
57
+ def process_user_input(self, user_input):
58
+ """
59
+ Process a user message, call the agent, and then reset context
60
+ if the number of interactions reaches max_messages.
61
+ """
62
+ # Get response from the agent
63
+ response = self.agent.run(user_input)
64
+ self.message_count += 1
65
+
66
+ # Check if we've reached the limit of messages before reset.
67
+ if self.message_count >= self.max_messages:
68
+ # Reset the agent's context.
69
+ if hasattr(self.agent, 'reset'):
70
+ self.agent.reset() # Use agent's built-in reset method if available.
71
+ elif hasattr(self.agent, 'conversation_history'):
72
+ self.agent.conversation_history.clear() # Clear conversation history if accessible.
73
+ self.message_count = 0 # Reset our counter.
74
+ return response
75
+
76
+ def launch(self):
77
+ """
78
+ Override launch if needed to ensure our process_user_input method is used.
79
+ This assumes that the base GradioUI calls a method we can override.
80
+ """
81
+ # If GradioUI accepts a custom function for processing input, you might pass self.process_user_input.
82
+ # Otherwise, ensure that the UI calls this method when handling a user message.
83
+ super().launch()
84
 
85
+ # Launch the custom UI.
86
+ CustomGradioUI(agent).launch()