- Module 1: Dashboard for cardiovascular disease data visualization - Module 2: Machine learning predictor with Flask API - Module 3: Voice assistant with DeepSeek and CosyVoice integration - Add .gitignore for proper file exclusion - Update requirements and documentation Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
47 lines
1.3 KiB
Python
47 lines
1.3 KiB
Python
import os
|
||
from langchain_openai import ChatOpenAI
|
||
from dotenv import load_dotenv
|
||
|
||
# 加载环境变量
|
||
load_dotenv()
|
||
|
||
|
||
def get_llm():
|
||
"""
|
||
初始化并返回 DeepSeek LLM 实例
|
||
"""
|
||
# 注意:这里的 ChatOpenAI 实际上是兼容 OpenAI 接口的类,用于连接 DeepSeek API
|
||
llm = ChatOpenAI(
|
||
base_url=os.getenv("base_url1"),
|
||
api_key=os.getenv("DEEPSEEK_API_KEY1"),
|
||
model="deepseek-chat",
|
||
temperature=0, # 设置为0,确保结果稳定
|
||
)
|
||
return llm
|
||
|
||
|
||
# 1. 初始化 LLM
|
||
llm = get_llm()
|
||
|
||
# 2. 用户级 prompt
|
||
user_prompt = "你是谁?请详细介绍一下你自己。"
|
||
|
||
print(f"--- 用户提问:{user_prompt} ---")
|
||
print("--- LLM 流式回复开始 ---")
|
||
|
||
# 3. **核心修改:使用 .stream() 方法进行流式调用**
|
||
# .stream() 返回一个迭代器,我们可以遍历它来获取分块的输出。
|
||
response_stream = llm.stream(user_prompt)
|
||
|
||
# 4. 遍历并打印流式输出
|
||
full_response = ""
|
||
for chunk in response_stream:
|
||
# chunk.content 包含当前流片段的内容
|
||
# end="" 确保打印时不换行,模拟流式输出效果
|
||
print(chunk.content, end="", flush=True)
|
||
full_response += chunk.content
|
||
|
||
print("\n--- LLM 流式回复结束 ---")
|
||
|
||
# 5. 可选:打印完整的回复内容
|
||
# print(f"\n完整的回复内容:{full_response}") |