Skip to content

Commit e1599a5

Browse files
v0.0.1
0 parents  commit e1599a5

File tree

10 files changed

+255
-0
lines changed

10 files changed

+255
-0
lines changed

.env

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
GROQ_API_KEY='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'

.gitgnore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.env

ChatBot.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import streamlit as st
2+
from dotenv import find_dotenv, load_dotenv
3+
from langchain_core.prompts import ChatPromptTemplate
4+
from langchain_groq import ChatGroq
5+
models = {
6+
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
7+
"llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
8+
"llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"},
9+
"llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"},
10+
"mixtral-8x7b-32768": {"name": "Mixtral-8x7b-Instruct-v0.1", "tokens": 32768, "developer": "Mistral"},
11+
}
12+
13+
def load_model(question,model_option):
14+
load_dotenv(find_dotenv()) # Load the .env file.
15+
chat = ChatGroq(temperature=0, model_name=model_option)
16+
system = "You are a helpful assistant."
17+
human = "{text}"
18+
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
19+
chain = prompt | chat
20+
result=chain.invoke({"text":question })
21+
return result.content
22+
23+
if __name__ == "__main__":
24+
#页面绘制
25+
st.header("ChatBot-AI机器人", divider="rainbow", anchor=False)
26+
# st.caption("🚀 A streamlit website based by GroqCloud")
27+
28+
#初始化模型选择
29+
if "selected_model" not in st.session_state:
30+
st.session_state.selected_model = None
31+
32+
# 初始化聊天记录
33+
if "messages" not in st.session_state:#初始化聊天记录
34+
st.session_state["messages"] = list()
35+
36+
with st.sidebar:
37+
#模型选择
38+
model_option = st.selectbox(
39+
"Choose a model:",
40+
options=list(models.keys()),
41+
format_func=lambda x: models[x]["name"],
42+
index=0 # Default to mixtral
43+
)
44+
#清除聊天记录
45+
st.button("Clear Chat History", on_click=lambda: st.session_state.pop("messages", None))
46+
st.text("Gemma适合中文语境")
47+
st.text("LLaMA2-70b性能均衡")
48+
st.text("LLaMA3-70b精准度高")
49+
st.text("LLaMA3-8b速度快")
50+
st.text("Mixtral速度快,适合长文本")
51+
52+
st.chat_message("assistant").write("你好,我是基于大模型的AI对话机器人,请输入你想询问的问题")
53+
# if "messages" not in st.session_state:
54+
# st.session_state["messages"] = [
55+
# {"role": "assistant", "content": "你好,我是基于大模型的AI对话机器人,请输入你想询问的问题"}
56+
# ]
57+
#显示对话过程
58+
for msg in st.session_state.messages:
59+
st.chat_message(msg["role"]).write(msg["content"])
60+
61+
#输入问题
62+
question = st.chat_input()
63+
if question:
64+
# print(question)
65+
st.session_state.messages.append({"role": "user", "content": question})
66+
st.chat_message("user").write(question)
67+
response = load_model(question,model_option)
68+
st.session_state.messages.append({"role": "assistant", "content": response})
69+
st.chat_message("assistant").write(response)
70+
71+
72+
73+
74+

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
### 基于LLM大模型的AI机器人
2+
一套基于开源框架、平台的AI语言模型机器人,集成人机对话,信息检索生成,PDF和URL解析对话等功能。
3+
4+
## 工具框架
5+
Langchain, Streamlit, Oracle Cloud
6+
7+
## 版本更新
8+
v0.0.1
9+
1. 构建Streamlit网页基本框架,集成chatBot对话功能
10+

demo.py

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
import streamlit as st
2+
from typing import Generator
3+
from groq import Groq
4+
5+
st.set_page_config(page_icon="💬", layout="wide",
6+
page_title="Groq Goes Brrrrrrrr...")
7+
8+
9+
def icon(emoji: str):
10+
"""Shows an emoji as a Notion-style page icon."""
11+
st.write(
12+
f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
13+
unsafe_allow_html=True,
14+
)
15+
16+
17+
icon("🏎️")
18+
19+
st.subheader("Groq Chat Streamlit App", divider="rainbow", anchor=False)
20+
21+
client = Groq(
22+
api_key='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',
23+
)
24+
25+
# Initialize chat history and selected model
26+
if "messages" not in st.session_state:
27+
st.session_state.messages = []
28+
29+
if "selected_model" not in st.session_state:
30+
st.session_state.selected_model = None
31+
32+
# Define model details
33+
models = {
34+
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
35+
"llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
36+
"llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"},
37+
"llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"},
38+
"mixtral-8x7b-32768": {"name": "Mixtral-8x7b-Instruct-v0.1", "tokens": 32768, "developer": "Mistral"},
39+
}
40+
41+
# Layout for model selection and max_tokens slider
42+
col1, col2 = st.columns(2)
43+
44+
with col1:
45+
model_option = st.selectbox(
46+
"Choose a model:",
47+
options=list(models.keys()),
48+
format_func=lambda x: models[x]["name"],
49+
index=4 # Default to mixtral
50+
)
51+
52+
# Detect model change and clear chat history if model has changed
53+
if st.session_state.selected_model != model_option:
54+
st.session_state.messages = []
55+
st.session_state.selected_model = model_option
56+
57+
max_tokens_range = models[model_option]["tokens"]
58+
59+
with col2:
60+
# Adjust max_tokens slider dynamically based on the selected model
61+
max_tokens = st.slider(
62+
"Max Tokens:",
63+
min_value=512, # Minimum value to allow some flexibility
64+
max_value=max_tokens_range,
65+
# Default value or max allowed if less
66+
value=min(32768, max_tokens_range),
67+
step=512,
68+
help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}"
69+
)
70+
71+
# Display chat messages from history on app rerun
72+
for message in st.session_state.messages:
73+
avatar = '🤖' if message["role"] == "assistant" else '👨‍💻'
74+
with st.chat_message(message["role"], avatar=avatar):
75+
st.markdown(message["content"])
76+
77+
78+
def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
79+
"""Yield chat response content from the Groq API response."""
80+
for chunk in chat_completion:
81+
if chunk.choices[0].delta.content:
82+
yield chunk.choices[0].delta.content
83+
84+
85+
if prompt := st.chat_input("Enter your prompt here..."):
86+
st.session_state.messages.append({"role": "user", "content": prompt})
87+
88+
with st.chat_message("user", avatar='👨‍💻'):
89+
st.markdown(prompt)
90+
91+
# Fetch response from Groq API
92+
try:
93+
chat_completion = client.chat.completions.create(
94+
model=model_option,
95+
messages=[
96+
{
97+
"role": m["role"],
98+
"content": m["content"]
99+
}
100+
for m in st.session_state.messages
101+
],
102+
max_tokens=max_tokens,
103+
stream=True
104+
)
105+
106+
# Use the generator function with st.write_stream
107+
with st.chat_message("assistant", avatar="🤖"):
108+
chat_responses_generator = generate_chat_responses(chat_completion)
109+
full_response = st.write_stream(chat_responses_generator)
110+
except Exception as e:
111+
st.error(e, icon="🚨")
112+
113+
# Append the full response to session_state.messages
114+
if isinstance(full_response, str):
115+
st.session_state.messages.append(
116+
{"role": "assistant", "content": full_response})
117+
else:
118+
# Handle the case where full_response is not a string
119+
combined_response = "\n".join(str(item) for item in full_response)
120+
st.session_state.messages.append(
121+
{"role": "assistant", "content": combined_response})

pages/RetrievalBot.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
import streamlit as st
2+
3+
4+
st.subheader("基于URL检索-AI机器人", divider="gray", anchor=False)
5+
# if "messages" not in st.session_state:
6+
# st.session_state["messages"] = [{"role": "assistant", "content": ""}]

pages/SummaryBot.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
import streamlit as st
2+
3+
4+
st.subheader("概要-AI机器人", divider="gray", anchor=False)
5+
# if "messages" not in st.session_state:
6+
# st.session_state["messages"] = [{"role": "assistant", "content": ""}]

pages/pdfBot.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
import streamlit as st
2+
3+
4+
st.subheader("基于PDF检索-AI机器人", divider="gray", anchor=False)
5+
# if "messages" not in st.session_state:
6+
# st.session_state["messages"] = [{"role": "assistant", "content": ""}]

rag.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from langchain_core.prompts import ChatPromptTemplate
2+
from langchain_groq import ChatGroq
3+
from dotenv import find_dotenv, load_dotenv
4+
5+
6+
class Chatbot:
7+
def __init__(self):
8+
load_dotenv(find_dotenv()) # Load the .env file.
9+
self.llm = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
10+
11+
12+
13+
14+
15+
if __name__ == "__main__":
16+
# chatbot = Chatbot()
17+
load_dotenv(find_dotenv()) # Load the .env file.
18+
chat = ChatGroq(temperature=0, model_name="gemma-7b-it")
19+
system = "You are a helpful assistant."
20+
human = "{text}"
21+
prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
22+
23+
chain = prompt | chat
24+
result=chain.invoke({"text": "你知道铭传大学吗."})
25+
print(result)
26+

requirements.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
streamlit
2+
langchain
3+
langchain-groq
4+
python-dotenv

0 commit comments

Comments
 (0)