Compare commits
3 Commits
a75792f64a
...
d1df40c1cd
| Author | SHA1 | Date | |
|---|---|---|---|
| d1df40c1cd | |||
| 7bcf9f0c87 | |||
| 01a35075d2 |
@@ -1,7 +1,7 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
# replace with your username:
|
# replace with your username:
|
||||||
name = guan
|
name = guan
|
||||||
version = 0.1.190
|
version = 0.1.193
|
||||||
author = guanjihuan
|
author = guanjihuan
|
||||||
author_email = guanjihuan@163.com
|
author_email = guanjihuan@163.com
|
||||||
description = An open source python package
|
description = An open source python package
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
Metadata-Version: 2.4
|
Metadata-Version: 2.4
|
||||||
Name: guan
|
Name: guan
|
||||||
Version: 0.1.190
|
Version: 0.1.193
|
||||||
Summary: An open source python package
|
Summary: An open source python package
|
||||||
Home-page: https://py.guanjihuan.com
|
Home-page: https://py.guanjihuan.com
|
||||||
Author: guanjihuan
|
Author: guanjihuan
|
||||||
|
|||||||
@@ -218,6 +218,125 @@ def langchain_chat_with_memory(prompt="你好", model="qwen-plus", temperature=0
|
|||||||
print()
|
print()
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
# 使用 LangChain 调用工具对话(需要 API Key)
|
||||||
|
def langchain_chat_with_tools(prompt="你好", model="qwen-plus", temperature=0.7, system_message=None, tools=None, print_show=1, load_env=1):
|
||||||
|
import guan
|
||||||
|
if tools==None:
|
||||||
|
response = guan.langchain_chat_without_memory(prompt=prompt, model=model, temperature=temperature, system_message=system_message, print_show=print_show, load_env=load_env)
|
||||||
|
else:
|
||||||
|
import os
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
|
from langchain.agents import create_openai_tools_agent, AgentExecutor
|
||||||
|
if load_env:
|
||||||
|
import dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
import inspect
|
||||||
|
caller_frame = inspect.stack()[1]
|
||||||
|
caller_dir = Path(caller_frame.filename).parent
|
||||||
|
env_path = caller_dir / ".env"
|
||||||
|
if env_path.exists():
|
||||||
|
dotenv.load_dotenv(env_path)
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
base_url=os.getenv("DASHSCOPE_BASE_URL"),
|
||||||
|
model=model,
|
||||||
|
temperature=temperature,
|
||||||
|
streaming=False,
|
||||||
|
)
|
||||||
|
if system_message == None:
|
||||||
|
prompt_template = ChatPromptTemplate.from_messages([
|
||||||
|
("human", "{input_message}"),
|
||||||
|
("placeholder", "{agent_scratchpad}"),
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
prompt_template = ChatPromptTemplate.from_messages([
|
||||||
|
("system", system_message),
|
||||||
|
("human", "{input_message}"),
|
||||||
|
("placeholder", "{agent_scratchpad}"),
|
||||||
|
])
|
||||||
|
agent = create_openai_tools_agent(llm, tools, prompt_template)
|
||||||
|
agent_executor = AgentExecutor(
|
||||||
|
agent=agent,
|
||||||
|
tools=tools,
|
||||||
|
verbose=bool(print_show),
|
||||||
|
handle_parsing_errors=True,
|
||||||
|
)
|
||||||
|
response_result = agent_executor.invoke({"input_message": prompt})
|
||||||
|
response = response_result["output"]
|
||||||
|
if print_show:
|
||||||
|
print('\n'+response)
|
||||||
|
return response
|
||||||
|
|
||||||
|
# 使用 LangChain 调用工具有记忆对话(记忆临时保存在函数的属性上,需要 API Key)
|
||||||
|
def langchain_chat_with_tools_and_memory(prompt="你好", model="qwen-plus", temperature=0.7, system_message=None, tools=None, session_id="default", print_show=1, load_env=1):
|
||||||
|
import guan
|
||||||
|
if tools==None:
|
||||||
|
response = guan.langchain_chat_with_memory(prompt=prompt, model=model, temperature=temperature, system_message=system_message, session_id=session_id, print_show=print_show, load_env=load_env)
|
||||||
|
else:
|
||||||
|
import os
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||||
|
from langchain_core.runnables.history import RunnableWithMessageHistory
|
||||||
|
from langchain_community.chat_message_histories import ChatMessageHistory
|
||||||
|
from langchain.agents import create_openai_tools_agent, AgentExecutor
|
||||||
|
if load_env:
|
||||||
|
import dotenv
|
||||||
|
from pathlib import Path
|
||||||
|
import inspect
|
||||||
|
caller_frame = inspect.stack()[1]
|
||||||
|
caller_dir = Path(caller_frame.filename).parent
|
||||||
|
env_path = caller_dir / ".env"
|
||||||
|
if env_path.exists():
|
||||||
|
dotenv.load_dotenv(env_path)
|
||||||
|
llm = ChatOpenAI(
|
||||||
|
api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
base_url=os.getenv("DASHSCOPE_BASE_URL"),
|
||||||
|
model=model,
|
||||||
|
temperature=temperature,
|
||||||
|
streaming=False,
|
||||||
|
)
|
||||||
|
if system_message == None:
|
||||||
|
prompt_template = ChatPromptTemplate.from_messages([
|
||||||
|
MessagesPlaceholder("history"),
|
||||||
|
("human", "{input_message}"),
|
||||||
|
("placeholder", "{agent_scratchpad}"),
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
prompt_template = ChatPromptTemplate.from_messages([
|
||||||
|
("system", system_message),
|
||||||
|
MessagesPlaceholder("history"),
|
||||||
|
("human", "{input_message}"),
|
||||||
|
("placeholder", "{agent_scratchpad}"),
|
||||||
|
])
|
||||||
|
|
||||||
|
if not hasattr(langchain_chat_with_tools_and_memory, "store"):
|
||||||
|
langchain_chat_with_tools_and_memory.store = {}
|
||||||
|
|
||||||
|
def get_session_history(sid: str):
|
||||||
|
if sid not in langchain_chat_with_tools_and_memory.store:
|
||||||
|
langchain_chat_with_tools_and_memory.store[sid] = ChatMessageHistory()
|
||||||
|
return langchain_chat_with_tools_and_memory.store[sid]
|
||||||
|
|
||||||
|
agent = create_openai_tools_agent(llm, tools, prompt_template)
|
||||||
|
agent_executor = AgentExecutor(
|
||||||
|
agent=agent,
|
||||||
|
tools=tools,
|
||||||
|
verbose=bool(print_show),
|
||||||
|
handle_parsing_errors=True,
|
||||||
|
)
|
||||||
|
agent_with_chat_history = RunnableWithMessageHistory(
|
||||||
|
agent_executor,
|
||||||
|
get_session_history,
|
||||||
|
input_messages_key="input_message",
|
||||||
|
history_messages_key="history",
|
||||||
|
)
|
||||||
|
response_result = agent_with_chat_history.invoke({"input_message": prompt}, config={"configurable": {"session_id": session_id}})
|
||||||
|
response = response_result["output"]
|
||||||
|
if print_show:
|
||||||
|
print('\n'+response)
|
||||||
|
return response
|
||||||
|
|
||||||
# 使用 Ollama 本地模型对话(需要运行 Ollama 和下载对应的模型)
|
# 使用 Ollama 本地模型对话(需要运行 Ollama 和下载对应的模型)
|
||||||
def ollama_chat(prompt='你好/no_think', model="qwen3:0.6b", temperature=0.8, print_show=1):
|
def ollama_chat(prompt='你好/no_think', model="qwen3:0.6b", temperature=0.8, print_show=1):
|
||||||
import ollama
|
import ollama
|
||||||
|
|||||||
@@ -80,6 +80,69 @@ def loop_calculation_with_three_parameters(function_name, parameter_array_1, par
|
|||||||
i1 += 1
|
i1 += 1
|
||||||
return result_array
|
return result_array
|
||||||
|
|
||||||
|
# 文本对比
|
||||||
|
def word_diff(a, b, print_show=1):
|
||||||
|
import difflib
|
||||||
|
import jieba
|
||||||
|
import logging
|
||||||
|
jieba.setLogLevel(logging.ERROR)
|
||||||
|
a_words = jieba.lcut(a)
|
||||||
|
b_words = jieba.lcut(b)
|
||||||
|
sm = difflib.SequenceMatcher(None, a_words, b_words)
|
||||||
|
result = []
|
||||||
|
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
||||||
|
if tag == "equal":
|
||||||
|
result.extend(a_words[i1:i2])
|
||||||
|
elif tag == "delete":
|
||||||
|
result.append("\033[9;91m" + "".join(a_words[i1:i2]) + "\033[0m")
|
||||||
|
elif tag == "insert":
|
||||||
|
result.append("\033[92m" + "".join(b_words[j1:j2]) + "\033[0m")
|
||||||
|
elif tag == "replace":
|
||||||
|
result.append("\033[9;91m" + "".join(a_words[i1:i2]) + "\033[0m")
|
||||||
|
result.append(" ")
|
||||||
|
result.append("\033[92m" + "".join(b_words[j1:j2]) + "\033[0m")
|
||||||
|
diff_result = "".join(result)
|
||||||
|
if print_show:
|
||||||
|
print(diff_result)
|
||||||
|
return diff_result
|
||||||
|
|
||||||
|
# 文本对比(写入HTML文件)
|
||||||
|
def word_diff_to_html(a, b, filename='diff_result', write_file=1):
|
||||||
|
import difflib
|
||||||
|
from html import escape
|
||||||
|
import jieba
|
||||||
|
import logging
|
||||||
|
jieba.setLogLevel(logging.ERROR)
|
||||||
|
a_words = jieba.lcut(a)
|
||||||
|
b_words = jieba.lcut(b)
|
||||||
|
sm = difflib.SequenceMatcher(None, a_words, b_words)
|
||||||
|
html_parts = []
|
||||||
|
for tag, i1, i2, j1, j2 in sm.get_opcodes():
|
||||||
|
if tag == "equal":
|
||||||
|
html_parts.append("".join(map(escape, a_words[i1:i2])))
|
||||||
|
elif tag == "delete":
|
||||||
|
html_parts.append(f"<span style='background:#e74c3c;color:white;padding:1px 2px;border-radius:2px;text-decoration:line-through;'>"
|
||||||
|
+ "".join(map(escape, a_words[i1:i2]))
|
||||||
|
+ "</span>")
|
||||||
|
elif tag == "insert":
|
||||||
|
html_parts.append(f"<span style='background:#2ecc71;color:white;padding:1px 2px;border-radius:2px;'>"
|
||||||
|
+ "".join(map(escape, b_words[j1:j2]))
|
||||||
|
+ "</span>")
|
||||||
|
elif tag == "replace":
|
||||||
|
html_parts.append(f"<span style='background:#e74c3c;color:white;padding:1px 2px;border-radius:2px;text-decoration:line-through;'>"
|
||||||
|
+ "".join(map(escape, a_words[i1:i2]))
|
||||||
|
+ "</span>")
|
||||||
|
html_parts.append(" ")
|
||||||
|
html_parts.append(f"<span style='background:#2ecc71;color:white;padding:1px 2px;border-radius:2px;'>"
|
||||||
|
+ "".join(map(escape, b_words[j1:j2]))
|
||||||
|
+ "</span>")
|
||||||
|
diff_result = "".join(html_parts)
|
||||||
|
diff_result = diff_result.replace("\n", "<br>")
|
||||||
|
if write_file:
|
||||||
|
with open(filename+'.html', 'w', encoding='UTF-8') as f:
|
||||||
|
f.write(diff_result)
|
||||||
|
return diff_result
|
||||||
|
|
||||||
# 打印数组
|
# 打印数组
|
||||||
def print_array(array, line_break=0):
|
def print_array(array, line_break=0):
|
||||||
if line_break == 0:
|
if line_break == 0:
|
||||||
|
|||||||
Reference in New Issue
Block a user