update
This commit is contained in:
2
2025.11.02_langchain/.env
Normal file
2
2025.11.02_langchain/.env
Normal file
@@ -0,0 +1,2 @@
|
||||
OPENAI_API_KEY=xxx
|
||||
DASHSCOPE_BASE_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
34
2025.11.02_langchain/langchain_example.py
Normal file
34
2025.11.02_langchain/langchain_example.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import langchain_openai
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
import dotenv
|
||||
import os
|
||||
|
||||
# 加载环境变量(包含API密钥)
|
||||
dotenv.load_dotenv()
|
||||
|
||||
# 创建聊天模型
|
||||
llm = langchain_openai.ChatOpenAI(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), # 从环境变量获取 API 密钥
|
||||
base_url=os.getenv("DASHSCOPE_BASE_URL"), # 指定 API 端点
|
||||
model="qwen-plus", # 使用通义千问 Plus 模型
|
||||
temperature=0.7, # 控制回复的随机性(0-1,越高越有创意)
|
||||
streaming=True, # 启用流式模式
|
||||
)
|
||||
|
||||
# 创建简单的提示词模板
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "你是一个友好的聊天助手。"), # 系统角色设定
|
||||
("human", "{question}") # 用户输入占位符
|
||||
])
|
||||
|
||||
# 创建处理链
|
||||
chain = prompt | llm # 使用管道操作符连接组件
|
||||
|
||||
# 使用 stream() 实现流式输出
|
||||
for chunk in chain.stream({"question": "你好"}):
|
||||
print(chunk.content, end="", flush=True)
|
||||
print() # 换行
|
||||
|
||||
# # 非流式输出
|
||||
# response = chain.invoke({"question": "你好"})
|
||||
# print(response.content)
|
||||
61
2025.11.02_langchain/langchain_example_with_memory.py
Normal file
61
2025.11.02_langchain/langchain_example_with_memory.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain_core.runnables.history import RunnableWithMessageHistory
|
||||
from langchain_community.chat_message_histories import ChatMessageHistory
|
||||
|
||||
# 加载 .env 中的 API 密钥等配置
|
||||
load_dotenv()
|
||||
|
||||
# 初始化大模型
|
||||
llm = ChatOpenAI(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
base_url=os.getenv("DASHSCOPE_BASE_URL"),
|
||||
model="qwen-plus",
|
||||
temperature=0.7
|
||||
)
|
||||
|
||||
# 定义带历史记录的提示模板
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "你是一个乐于助人的助手。"),
|
||||
MessagesPlaceholder("history"), # 历史消息占位符
|
||||
("human", "{input}") # 当前用户输入
|
||||
])
|
||||
|
||||
# 创建基础链
|
||||
chain = prompt | llm
|
||||
|
||||
# 内存存储:用字典模拟会话历史(仅用于演示)
|
||||
store = {}
|
||||
|
||||
def get_session_history(session_id: str):
|
||||
if session_id not in store:
|
||||
store[session_id] = ChatMessageHistory()
|
||||
return store[session_id]
|
||||
|
||||
# 包装成带记忆的链
|
||||
chatbot = RunnableWithMessageHistory(
|
||||
chain,
|
||||
get_session_history,
|
||||
input_messages_key="input",
|
||||
history_messages_key="history",
|
||||
)
|
||||
|
||||
def chat_with_agent(input_message, session_id):
|
||||
print(f"用户: {input_message}")
|
||||
print("助手: ", end="", flush=True)
|
||||
for chunk in chatbot.stream(
|
||||
{"input": input_message},
|
||||
config={"configurable": {"session_id": session_id}} # 多轮对话(使用同一个 session_id)
|
||||
):
|
||||
print(chunk.content, end="", flush=True)
|
||||
print("\n\n---\n")
|
||||
|
||||
chat_with_agent(input_message='一句话解释下人工智能。', session_id="user_001")
|
||||
|
||||
chat_with_agent(input_message='我们都聊了什么?', session_id="user_001")
|
||||
|
||||
chat_with_agent(input_message='我们都聊了什么?', session_id="user_002")
|
||||
|
||||
chat_with_agent(input_message='我们都聊了什么?', session_id="user_001")
|
||||
57
2025.11.02_langchain/langchain_example_with_tool.py
Normal file
57
2025.11.02_langchain/langchain_example_with_tool.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import os
|
||||
import dotenv
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.tools import tool
|
||||
from langchain.agents import create_openai_tools_agent, AgentExecutor
|
||||
|
||||
# 加载环境变量
|
||||
dotenv.load_dotenv()
|
||||
|
||||
# 定义工具(Tool)
|
||||
@tool
|
||||
def get_current_time() -> str:
|
||||
"""获取当前日期和时间"""
|
||||
from datetime import datetime
|
||||
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
@tool
|
||||
def add_numbers(a: float, b: float) -> float:
|
||||
"""将两个数字相加"""
|
||||
return a + b
|
||||
|
||||
# 注意:你可以添加更多工具,比如天气查询、网络搜索等
|
||||
|
||||
tools = [get_current_time, add_numbers]
|
||||
|
||||
# 创建 LLM(必须支持 function calling)
|
||||
llm = ChatOpenAI(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
base_url=os.getenv("DASHSCOPE_BASE_URL"),
|
||||
model="qwen-plus",
|
||||
temperature=0.7,
|
||||
streaming=True,
|
||||
)
|
||||
|
||||
# 构建提示模板(LangChain 会自动注入工具信息)
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "你是一个智能助手,可以使用工具来回答问题。"),
|
||||
("human", "{input_message}"),
|
||||
("placeholder", "{agent_scratchpad}"), # 必须包含这个占位符
|
||||
])
|
||||
|
||||
# 创建 OpenAI 工具型智能体(兼容 function calling)
|
||||
agent = create_openai_tools_agent(llm, tools, prompt)
|
||||
|
||||
# 创建执行器
|
||||
agent_executor = AgentExecutor(
|
||||
agent=agent,
|
||||
tools=tools,
|
||||
verbose=True, # 打印中间步骤(可选)
|
||||
handle_parsing_errors=True,
|
||||
)
|
||||
|
||||
# 非流式调用(AgentExecutor 目前对流式支持有限,尤其在工具调用场景)
|
||||
response = agent_executor.invoke({"input_message": "现在几点了?然后把 123 和 456 加起来。"})
|
||||
print('\n---\n')
|
||||
print(response["output"])
|
||||
Reference in New Issue
Block a user