Carlosbatista commited on
Commit
c89d563
verified
1 Parent(s): c60fd93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -34
app.py CHANGED
@@ -1,44 +1,66 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Use um modelo compat铆vel com text_generation
5
- client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
6
-
7
- def respond(message, history, system_message, max_tokens, temperature, top_p):
8
- # Constr贸i o prompt: inclui instru莽茫o do sistema e hist贸rico como texto
9
- prompt = f"{system_message.strip()}\n"
10
-
11
- for user_msg, bot_msg in history:
12
- prompt += f"\nUser: {user_msg}\nAssistant: {bot_msg}"
13
-
14
- prompt += f"\nUser: {message}\nAssistant:"
15
-
16
- try:
17
- response = client.text_generation(
18
- prompt=prompt,
19
- max_new_tokens=max_tokens,
20
- temperature=temperature,
21
- top_p=top_p,
22
- stop=["User:", "user:", "USER:"], # evita continuar indefinidamente
23
- )
24
- return response.strip()
25
- except Exception as e:
26
- return f"Erro ao gerar resposta: {str(e)}"
27
-
28
- # Interface Gradio com entrada para system prompt e sliders
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  demo = gr.ChatInterface(
30
- fn=respond,
31
  additional_inputs=[
32
- gr.Textbox(
33
- label="System message",
34
- value="You are a career and recruitment expert. Avoid small talk. First, ask the user to provide their skills and experience. After receiving that, ask for the job description and requirements. Then, compare the job description with the candidate's profile. Based on this comparison, respond with suggestions to improve the resume and align it with the job, missing keywords, and an example of a professional summary tailored to the job.",
35
- lines=5
 
 
 
 
 
36
  ),
37
- gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
38
- gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
39
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
40
  ],
41
  )
42
 
 
43
  if __name__ == "__main__":
44
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+ #client = InferenceClient("meta-llama/Llama-3.2-1B-Instruct")
9
+ #client = InferenceClient("microsoft/Phi-3.5-mini-instruct")
10
+ #client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
11
+
12
+
13
+ def respond(
14
+ message,
15
+ history: list[tuple[str, str]],
16
+ system_message,
17
+ max_tokens,
18
+ temperature,
19
+ top_p,
20
+ ):
21
+ messages = [{"role": "system", "content": system_message}]
22
+
23
+ for val in history:
24
+ if val[0]:
25
+ messages.append({"role": "user", "content": val[0]})
26
+ if val[1]:
27
+ messages.append({"role": "assistant", "content": val[1]})
28
+
29
+ messages.append({"role": "user", "content": message})
30
+
31
+ response = ""
32
+
33
+
34
+ mensagens = client.chat_completion(
35
+ messages,
36
+ max_tokens=max_tokens,
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ )
40
+ response = mensagens.choices[0].message.content
41
+
42
+ return response
43
+
44
+
45
+ """
46
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
47
+ """
48
  demo = gr.ChatInterface(
49
+ respond,
50
  additional_inputs=[
51
+ gr.Textbox(value="You are a friendly Chatbot. Your name is Juninho.", label="System message"),
52
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
53
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
+ gr.Slider(
55
+ minimum=0.1,
56
+ maximum=1.0,
57
+ value=0.95,
58
+ step=0.05,
59
+ label="Top-p (nucleus sampling)",
60
  ),
 
 
 
61
  ],
62
  )
63
 
64
+
65
  if __name__ == "__main__":
66
+ demo.launch()