-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
171 lines (131 loc) · 5.64 KB
/
app.py
File metadata and controls
171 lines (131 loc) · 5.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import re
import base64
import streamlit as st
from ollama import chat
# Set Streamlit page configuration
st.set_page_config(page_title="Mini ChatGPT with DeepSeek Intelligence", layout="centered")
def format_reasoning_response(content):
"""Remove <think> tags from assistant content."""
return re.sub(r"<think>|</think>", "", content)
def display_message(message):
"""Display a single message in the chat interface."""
role = "user" if message["role"] == "user" else "assistant"
with st.chat_message(role):
if role == "assistant":
display_assistant_message(message["content"])
else:
st.markdown(message["content"])
def display_assistant_message(content):
"""Display assistant message with thinking content if present."""
match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
if match:
think_content = format_reasoning_response(match.group(0))
response_content = content.replace(match.group(0), "")
with st.expander("Thinking complete!"):
st.markdown(think_content)
st.markdown(response_content)
else:
st.markdown(content)
def display_chat_history():
"""Display all previous messages in the active chat history."""
st.session_state["messages"] = st.session_state["conversations"][st.session_state["active_chat"]]
for msg in st.session_state["messages"]:
if msg["role"] != "system":
display_message(msg)
def process_thinking_phase(stream):
"""Process the thinking phase of the assistant's response."""
thinking_content = ""
with st.status("Thinking...", expanded=True) as status:
think_placeholder = st.empty()
for chunk in stream:
content = chunk["message"]["content"] or ""
thinking_content += content
if "<think>" in content:
continue
if "</think>" in content:
content = content.replace("</think>", "")
status.update(label="Thinking complete!", state="complete", expanded=False)
break
think_placeholder.markdown(format_reasoning_response(thinking_content))
return thinking_content
def process_response_phase(stream):
"""Process the response phase of the assistant's response."""
response_placeholder = st.empty()
response_content = ""
for chunk in stream:
content = chunk["message"]["content"] or ""
response_content += content
response_placeholder.markdown(response_content)
return response_content
@st.cache_resource
def get_chat_model():
"""Get a cached instance of the chat model."""
return lambda msgs: chat(
model="deepseek-r1",
messages=msgs,
stream=True
)
def handle_user_input():
"""Handle new user input and generate assistant response."""
if user_input := st.chat_input("Type your message here..."):
st.session_state["messages"].append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
chat_model = get_chat_model()
stream = chat_model(st.session_state["messages"])
thinking_content = process_thinking_phase(stream)
response_content = process_response_phase(stream)
# Save the complete response
st.session_state["messages"].append(
{"role": "assistant", "content": thinking_content + response_content}
)
def initialize_session():
"""Initialize session state variables."""
if "conversations" not in st.session_state:
st.session_state["conversations"] = {
"Chat 1": [{"role": "system", "content": "You are a helpful assistant."}]
}
if "active_chat" not in st.session_state:
st.session_state["active_chat"] = "Chat 1"
st.session_state["messages"] = st.session_state["conversations"][st.session_state["active_chat"]]
def new_chat():
"""Create a new chat tab without deleting previous chats."""
chat_count = len(st.session_state["conversations"]) + 1
new_chat_name = f"Chat {chat_count}"
st.session_state["conversations"][new_chat_name] = [
{"role": "system", "content": "You are a helpful assistant."}
]
st.session_state["active_chat"] = new_chat_name
st.rerun()
def main():
"""Main function to handle the chat interface."""
initialize_session()
logo_path = "assets/deep-seek.png"
with open(logo_path, "rb") as img_file:
encoded_logo = base64.b64encode(img_file.read()).decode()
# Sidebar for chat selection
st.sidebar.title("Chats")
if st.sidebar.button("➕ New Chat", use_container_width=True):
new_chat()
chat_selection = st.sidebar.radio(
"Select a conversation:",
list(st.session_state["conversations"].keys()),
index=list(st.session_state["conversations"].keys()).index(st.session_state["active_chat"])
)
# Update active chat
if chat_selection != st.session_state["active_chat"]:
st.session_state["active_chat"] = chat_selection
st.session_state["messages"] = st.session_state["conversations"][chat_selection]
st.rerun()
# Display UI
st.markdown(f"""
<div style="text-align: center;">
<h3>Mini ChatGPT, Driven by <img src="data:image/png;base64,{encoded_logo}" width="170"><br> Intelligence</h3>
<h4>How can I help you today?</h4>
</div>
""", unsafe_allow_html=True)
display_chat_history()
handle_user_input()
if __name__ == "__main__":
main()