构建智能对话机器人
本文会讲解如何从零开始构建一个LLM对话应用。首先需要完成第02篇的环境搭建,并掌握基本的Python语法。如果使用OpenAI API,需要有API密钥;如果使用本地模型,可以通过Ollama运行。
我们要构建的应用包括基础对话功能、多轮对话管理、Web界面和API服务。
最简对话机器人
从最简单的实现开始。只需要几行代码,就可以调用大模型进行对话:
# chatbot_v1.py - 最简对话机器人importosfromopenaiimportOpenAI# 统一的客户端初始化client=OpenAI(api_key=os.getenv("OPENAI_API_KEY","your-api-key"))# 统一使用的模型MODEL="gpt-3.5-turbo"defchat(message:str)->str:response=client.chat.completions.create(model=MODEL,messages=[{"role":"user","content":message}])returnresponse.choices[0].message.content# 测试print(chat("Python中的装饰器是什么?"))如果想给机器人设置一个特定的人设或角色,可以加入系统提示词:
# chatbot_v2.py - 带人设的机器人importosfromopenaiimportOpenAIclassChatBot:def__init__(self,system_prompt:str="你是一个有帮助的AI助手"):self.client=OpenAI(api_key=os.getenv("OPENAI_API_KEY"))self.model="gpt-3.5-turbo"# 统一模型名称self.system_prompt=system_promptdefchat(self,message:str)->str:try:response=self.client.chat.completions.create(model=self.model,messages=[{"role":"system","content":self.system_prompt},{"role":"user","content":message}],temperature=0.7)returnresponse.choices[0].message.contentexceptExceptionase:returnf"错误:{str(e)}"# 创建专业机器人coder=ChatBot("你是一个Python专家,回答要简洁准确")print(coder.chat("如何实现单例模式?"))多轮对话管理
简单的单轮对话很难满足实际需求。大多数应用需要记住历史对话内容,这样模型才能理解上下文。
对话历史管理
# conversation.py - 完整对话管理importosfromtypingimportList,DictimportjsonfromdatetimeimportdatetimefromopenaiimportOpenAIclassConversationManager:def__init__(self,model="gpt-3.5-turbo",max_history=10):self.client=OpenAI(api_key=os.getenv("OPENAI_API_KEY"))self.model=model self.max_history=max_history self.conversations={}defcreate_session(self,session_id:str=None)->str:"""创建新会话"""ifnotsession_id:session_id=datetime.now().strftime("%Y%m%d_%H%M%S")self.conversations[session_id]=[]returnsession_iddefadd_message(self,session_id:str,role:str,content:str):"""添加消息到历史"""ifsession_idnotinself.conversations:self.create_session(session_id)self.conversations[session_id].append({"role":role,"content":content,"timestamp":datetime.now().isoformat()})# 限制历史长度iflen(self.conversations[session_id])>self.max_history*2:self.conversations[session_id]=self.conversations[session_id][-self.max_history:]defchat(self,session_id:str,message:str)->str:"""进行对话"""ifsession_idnotinself.conversations:self.create_session(session_id)# 添加用户消息self.add_message(session_id,"user",message)# 准备API调用的消息(不包含timestamp)messages=[{"role":m["role"],"content":m["content"]}forminself.conversations[session_id]]# 调用APIresponse=self.client.chat.completions.create(model=self.model,messages=messages)# 添加助手回复reply=response.choices[0].message.content self.add_message(session_id,"assistant",reply)returnreplydefsave_session(self,session_id:str,filepath:str):"""保存会话"""withopen(filepath,'w',encoding='utf-8')asf:json.dump(self.conversations.get(session_id,[]),f,ensure_ascii=False,indent=2)defload_session(self,session_id:str,filepath:str):"""加载会话"""withopen(filepath,'r',encoding='utf-8')asf:self.conversations[session_id]=json.load(f)# 使用示例manager=ConversationManager()session=manager.create_session()# 多轮对话print(manager.chat(session,"我想学习机器学习"))print(manager.chat(session,"推荐一些入门书籍"))print(manager.chat(session,"第一本书的主要内容是什么"))# 会记住上下文流式输出
传统的API调用需要等待整个回复生成完毕。为了改善用户体验,可以在生成的过程中逐步输出内容:
# streaming.py - 流式对话importsysclassStreamingChatBot:def__init__(self):self.client=OpenAI()defstream_chat(self,message:str):"""流式生成回复"""stream=self.client.chat.completions.create(model="gpt-3.5-turbo",messages=[{"role":"user","content":message}],stream=True)full_response=""forchunkinstream:ifchunk.choices[0].delta.content:content=chunk.choices[0].delta.content full_response+=content# 实时打印sys.stdout.write(content)sys.stdout.flush()returnfull_response# 测试流式输出bot=StreamingChatBot()response=bot.stream_chat("写一个关于春天的诗")Web界面实现
仅有命令行界面不太友好。下面展示两种方式创建Web聊天界面。
Gradio版本(快速上手)
# app_gradio.py - Gradio聊天界面importgradioasgrfromconversationimportConversationManager manager=ConversationManager()current_session=manager.create_session()defrespond(message,history):"""处理消息并返回响应"""response=manager.chat(current_session,message)history.append((message,response))return"",historydefclear_history():"""清空对话历史"""globalcurrent_session current_session=manager.create_session()return[]# 创建界面withgr.Blocks(title="LLM Chat")asdemo:gr.Markdown("## 🤖 智能对话助手")chatbot=gr.Chatbot(height=400)msg=gr.Textbox(placeholder="输入消息...",label="消息",lines=2)withgr.Row():submit=gr.Button("发送",variant="primary")clear=gr.Button("清空对话")# 绑定事件msg.submit(respond,[msg,chatbot],[msg,chatbot])submit.click(respond,[msg,chatbot],[msg,chatbot])clear.click(clear_history,outputs=[chatbot])# 启动if__name__=="__main__":demo.launch(share=True,server_port=7860)Gradio非常简洁,适合快速原型化。如果你需要更多的定制化,可以使用Streamlit:
Streamlit版本(更多定制)
# app_streamlit.py - Streamlit聊天界面importstreamlitasstfromconversationimportConversationManager# 页面配置st.set_page_config(page_title="LLM Chat",page_icon="🤖",layout="wide")# 初始化if'manager'notinst.session_state:st.session_state.manager=ConversationManager()st.session_state.session_id=st.session_state.manager.create_session()if'messages'notinst.session_state:st.session_state.messages=[]# 侧边栏withst.sidebar:st.title("⚙️ 设置")model=st.selectbox("模型",["gpt-3.5-turbo","gpt-4","gpt-4-turbo"])temperature=st.slider("创造性",0.0,1.0,0.7)max_tokens=st.slider("最大长度",50,2000,500)ifst.button("清空对话"):st.session_state.messages=[]st.session_state.session_id=st.session_state.manager.create_session()st.rerun()# 主界面st.title("🤖 LLM对话助手")# 显示历史消息formessageinst.session_state.messages:withst.chat_message(message["role"]):st.markdown(message["content"])# 输入框ifprompt:=st.chat_input("说点什么..."):# 显示用户消息st.session_state.messages.append({"role":"user","content":prompt})withst.chat_message("user"):st.markdown(prompt)# 生成回复withst.chat_message("assistant"):message_placeholder=st.empty()full_response=""# 这里可以实现流式输出response=st.session_state.manager.chat(st.session_state.session_id,prompt)# 模拟流式效果importtimeforcharinresponse:full_response+=char message_placeholder.markdown(full_response+"▌")time.sleep(0.01)message_placeholder.markdown(full_response)st.session_state.messages.append({"role":"assistant","content":full_response})高级功能扩展
一旦基础功能稳定,可以加入更多高级特性来增强应用。
多模型支持
# multi_model.py - 多模型路由classMultiModelChat:def__init__(self):self.models={"openai":self._chat_openai,"anthropic":self._chat_anthropic,"local":self._chat_local}def_chat_openai(self,message:str)->str:client=OpenAI()response=client.chat.completions.create(model="gpt-3.5-turbo",messages=[{"role":"user","content":message}])returnresponse.choices[0].message.contentdef_chat_anthropic(self,message:str)->str:# Anthropic Claude实现fromanthropicimportAnthropic client=Anthropic()response=client.messages.create(model="claude-3-5-sonnet-20241022",max_tokens=1000,messages=[{"role":"user","content":message}])returnresponse.content[0].textdef_chat_local(self,message:str)->str:# 本地模型(使用Ollama)importrequests response=requests.post("http://localhost:11434/api/generate",json={"model":"llama2","prompt":message})returnresponse.json()["response"]defchat(self,message:str,model:str="openai")->str:ifmodelnotinself.models:raiseValueError(f"不支持的模型:{model}")returnself.models[model](message)插件系统
# plugins.py - 功能插件fromtypingimportCallable,DictimportjsonclassChatPlugin:"""聊天插件基类"""defprocess(self,message:str,context:Dict)->str:raiseNotImplementedErrorclassCodeExecutor(ChatPlugin):"""代码执行插件"""defprocess(self,message:str,context:Dict)->str:if"```python"inmessage:# 提取代码code=message.split("```python")[1].split("```")[0]# 安全执行(实际应用需要沙箱)try:exec_globals={}exec(code,exec_globals)returnf"代码执行成功"exceptExceptionase:returnf"执行错误:{e}"returnmessageclassWebSearch(ChatPlugin):"""网络搜索插件"""defprocess(self,message:str,context:Dict)->str:if"搜索:"inmessage:query=message.split("搜索:")[1].strip()# 这里调用搜索APIreturnf"搜索结果: [相关内容]"returnmessageclassPluggableChatBot:def__init__(self):self.client=OpenAI()self.plugins=[]defadd_plugin(self,plugin:ChatPlugin):"""添加插件"""self.plugins.append(plugin)defchat(self,message:str)->str:# 插件预处理context={}forplugininself.plugins:message=plugin.process(message,context)# 调用LLMresponse=self.client.chat.completions.create(model="gpt-3.5-turbo",messages=[{"role":"user","content":message}])result=response.choices[0].message.content# 插件后处理forplugininreversed(self.plugins):result=plugin.process(result,context)returnresult性能优化
优化应用性能的常见方法包括缓存、批处理和异步调用:
# optimization.py - 性能优化技巧importasynciofromfunctoolsimportlru_cacheimporthashlibclassOptimizedChatBot:def__init__(self):self.client=OpenAI()self.cache={}@lru_cache(maxsize=100)def_get_embedding(self,text:str):"""缓存embedding计算"""response=self.client.embeddings.create(model="text-embedding-ada-002",input=text)returnresponse.data[0].embeddingdef_cache_key(self,message:str,model:str)->str:"""生成缓存键"""returnhashlib.md5(f"{model}:{message}".encode()).hexdigest()asyncdefchat_async(self,message:str,model:str="gpt-3.5-turbo"):"""异步调用"""# 检查缓存cache_key=self._cache_key(message,model)ifcache_keyinself.cache:returnself.cache[cache_key]# 异步API调用response=awaitasyncio.to_thread(self.client.chat.completions.create,model=model,messages=[{"role":"user","content":message}])result=response.choices[0].message.content self.cache[cache_key]=resultreturnresultasyncdefbatch_chat(self,messages:list):"""批量处理"""tasks=[self.chat_async(msg)formsginmessages]returnawaitasyncio.gather(*tasks)# 使用示例asyncdefmain():bot=OptimizedChatBot()# 批量处理questions=["什么是Python?","如何学习编程?","推荐一些书籍"]responses=awaitbot.batch_chat(questions)forq,rinzip(questions,responses):print(f"Q:{q}\nA:{r}\n")# asyncio.run(main())部署和测试
完成开发后,需要将应用部署到生产环境。下面展示几种常见的部署方式。
API服务化
# api_server.py - FastAPI服务fromfastapiimportFastAPI,HTTPExceptionfrompydanticimportBaseModelfromtypingimportOptionalimportuvicorn app=FastAPI(title="LLM Chat API")classChatRequest(BaseModel):message:strsession_id:Optional[str]=Nonemodel:str="gpt-3.5-turbo"classChatResponse(BaseModel):response:strsession_id:strmanager=ConversationManager()@app.post("/chat",response_model=ChatResponse)asyncdefchat(request:ChatRequest):try:session_id=request.session_idormanager.create_session()response=manager.chat(session_id,request.message)returnChatResponse(response=response,session_id=session_id)exceptExceptionase:raiseHTTPException(status_code=500,detail=str(e))@app.get("/health")asyncdefhealth():return{"status":"healthy"}if__name__=="__main__":uvicorn.run(app,host="0.0.0.0",port=8000)Docker部署
# Dockerfile FROM python:3.10-slim WORKDIR /app COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt COPY . . EXPOSE 7860 CMD ["python", "app_gradio.py"]# docker-compose.ymlversion:'3.8'services:chatbot:build:.ports:-"7860:7860"environment:-OPENAI_API_KEY=${OPENAI_API_KEY}volumes:-./data:/app/datarestart:unless-stopped测试
在部署到生产环境前,应该对关键功能进行测试:
# test_chat.py - 测试用例importunittestfromchatbot_v2importChatBotclassTestChatBot(unittest.TestCase):defsetUp(self):self.bot=ChatBot()deftest_basic_chat(self):response=self.bot.chat("Hello")self.assertIsNotNone(response)self.assertIsInstance(response,str)deftest_context_preservation(self):manager=ConversationManager()session=manager.create_session()manager.chat(session,"我叫小明")response=manager.chat(session,"我叫什么名字?")self.assertIn("小明",response)if__name__=="__main__":unittest.main()后续改进方向
一旦基础应用可用,你可以继续拓展功能。常见的改进包括:
- 添加语音输入和输出,让应用更易用
- 集成RAG系统,增加知识库支持
- 实现多语言支持,让应用面向国际用户
在本系列后续文章中,这些功能会得到更详细的讨论。