baxin commited on
Commit
be70109
Β·
unverified Β·
2 Parent(s): 3b463a9 7d8708c

Merge pull request #2 from koji/feat_add-base-chat-app

Browse files
Files changed (2) hide show
  1. app.py +130 -2
  2. requirements.txt +2 -0
app.py CHANGED
@@ -1,4 +1,132 @@
1
  import streamlit as st
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from cerebras.cloud.sdk import Cerebras
3
+ import openai
4
 
5
+ # Set page configuration
6
+ st.set_page_config(page_icon="πŸ€–", layout="wide", page_title="Cerebras")
7
+
8
+
9
+ def icon(emoji: str):
10
+ """Shows an emoji as a Notion-style page icon."""
11
+ st.write(
12
+ f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
13
+ unsafe_allow_html=True,
14
+ )
15
+
16
+
17
+ # Display header
18
+ icon("🧠")
19
+ st.title("ChatBot with Cerebras API")
20
+ st.subheader("Deploying Cerebras on Streamlit", divider="orange", anchor=False)
21
+
22
+ # Define model details
23
+ models = {
24
+ "llama3.1-8b": {"name": "Llama3.1-8b", "tokens": 8192, "developer": "Meta"},
25
+ "llama-3.3-70b": {"name": "Llama-3.3-70b", "tokens": 8192, "developer": "Meta"}
26
+ }
27
+
28
+ BASE_URL = "http://localhost:8000/v1"
29
+
30
+ # Sidebar configuration
31
+ with st.sidebar:
32
+ st.title("Settings")
33
+ st.markdown("### :red[Enter your Cerebras API Key below]")
34
+ api_key = st.text_input("Cerebras API Key:", type="password")
35
+
36
+ # Model selection
37
+ model_option = st.selectbox(
38
+ "Choose a model:",
39
+ options=list(models.keys()),
40
+ format_func=lambda x: models[x]["name"],
41
+ key="model_select"
42
+ )
43
+
44
+ # Max tokens slider
45
+ max_tokens_range = models[model_option]["tokens"]
46
+ max_tokens = st.slider(
47
+ "Max Tokens:",
48
+ min_value=512,
49
+ max_value=max_tokens_range,
50
+ value=max_tokens_range,
51
+ step=512,
52
+ help="Select the maximum number of tokens (words) for the model's response."
53
+ )
54
+
55
+ use_optillm = st.toggle("Use Optillm", value=False)
56
+
57
+ # Check for API key before proceeding
58
+ if not api_key:
59
+ st.markdown("""
60
+ ## Cerebras API x Streamlit Demo!
61
+
62
+ This simple chatbot app demonstrates how to use Cerebras with Streamlit.
63
+
64
+ To get started:
65
+ 1. :red[Enter your Cerebras API Key in the sidebar.]
66
+ 2. Chat away, powered by Cerebras.
67
+ """)
68
+ st.stop()
69
+
70
+ # Initialize Cerebras client
71
+ # client = Cerebras(api_key=api_key)
72
+
73
+ if use_optillm:
74
+ client = openai.OpenAI(
75
+ base_url="http://localhost:8000/v1",
76
+ api_key=api_key
77
+ )
78
+ else:
79
+ client = Cerebras(api_key=api_key)
80
+
81
+
82
+ # Chat history management
83
+ if "messages" not in st.session_state:
84
+ st.session_state.messages = []
85
+
86
+ if "selected_model" not in st.session_state:
87
+ st.session_state.selected_model = None
88
+
89
+ # Clear history if model changes
90
+ if st.session_state.selected_model != model_option:
91
+ st.session_state.messages = []
92
+ st.session_state.selected_model = model_option
93
+
94
+ # Display chat messages
95
+ for message in st.session_state.messages:
96
+ avatar = 'πŸ€–' if message["role"] == "assistant" else 'πŸ¦”'
97
+ with st.chat_message(message["role"], avatar=avatar):
98
+ st.markdown(message["content"])
99
+
100
+ # Chat input and processing
101
+ if prompt := st.chat_input("Enter your prompt here..."):
102
+ st.session_state.messages.append({"role": "user", "content": prompt})
103
+
104
+ with st.chat_message("user", avatar='πŸ¦”'):
105
+ st.markdown(prompt)
106
+
107
+ try:
108
+ # Create empty container for streaming response
109
+ with st.chat_message("assistant", avatar="πŸ€–"):
110
+ response_placeholder = st.empty()
111
+ full_response = ""
112
+
113
+ # Stream the response
114
+ for chunk in client.chat.completions.create(
115
+ model=model_option,
116
+ messages=[{"role": "user", "content": prompt}],
117
+ max_tokens=max_tokens,
118
+ stream=True, # Ensure Cerebras API supports streaming
119
+ # base_url=BASE_URL
120
+ ):
121
+ if chunk.choices[0].delta.content:
122
+ chunk_content = chunk.choices[0].delta.content
123
+ full_response += chunk_content
124
+ response_placeholder.markdown(full_response + "β–Œ")
125
+
126
+ # Update the final response without cursor
127
+ response_placeholder.markdown(full_response)
128
+ st.session_state.messages.append(
129
+ {"role": "assistant", "content": full_response})
130
+
131
+ except Exception as e:
132
+ st.error(f"Error generating response: {str(e)}", icon="🚨")
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ cerebras_cloud_sdk
2
+ openai