Compare commits

...

2 Commits

Author SHA1 Message Date
a75792f64a 0.1.190 2025-12-03 11:40:26 +08:00
639f04868f update 2025-12-02 11:11:09 +08:00
3 changed files with 82 additions and 21 deletions

View File

@@ -1,7 +1,7 @@
[metadata] [metadata]
# replace with your username: # replace with your username:
name = guan name = guan
version = 0.1.188 version = 0.1.190
author = guanjihuan author = guanjihuan
author_email = guanjihuan@163.com author_email = guanjihuan@163.com
description = An open source python package description = An open source python package

View File

@@ -1,6 +1,6 @@
Metadata-Version: 2.4 Metadata-Version: 2.4
Name: guan Name: guan
Version: 0.1.188 Version: 0.1.190
Summary: An open source python package Summary: An open source python package
Home-page: https://py.guanjihuan.com Home-page: https://py.guanjihuan.com
Author: guanjihuan Author: guanjihuan

View File

@@ -1,6 +1,6 @@
# Module: AI_chat # Module: AI_chat
# AI 对话 # AI 对话(当前默认模型为 'hunyuan-lite',无记忆)
def chat(prompt='你好', model=1, stream=1, stream_label=0): def chat(prompt='你好', model=1, stream=1, stream_label=0):
import requests import requests
url = "http://api.guanjihuan.com/chat" url = "http://api.guanjihuan.com/chat"
@@ -27,7 +27,7 @@ def chat(prompt='你好', model=1, stream=1, stream_label=0):
print('\n--- End Chat Stream Message ---\n') print('\n--- End Chat Stream Message ---\n')
return response return response
# 加上函数代码的 AI 对话 # 加上函数代码的 AI 对话(当前默认模型为 'hunyuan-lite',无记忆)
def chat_with_function_code(function_name, prompt='', model=1, stream=1): def chat_with_function_code(function_name, prompt='', model=1, stream=1):
import guan import guan
function_source = guan.get_source(function_name) function_source = guan.get_source(function_name)
@@ -37,7 +37,7 @@ def chat_with_function_code(function_name, prompt='', model=1, stream=1):
response = guan.chat(prompt=function_source+'\n\n'+prompt, model=model, stream=stream) response = guan.chat(prompt=function_source+'\n\n'+prompt, model=model, stream=stream)
return response return response
# 机器人自动对话 # 机器人自动对话(当前默认模型为 'hunyuan-lite',无记忆)
def auto_chat(prompt='你好', round=2, model=1, stream=1): def auto_chat(prompt='你好', round=2, model=1, stream=1):
import guan import guan
response0 = prompt response0 = prompt
@@ -48,7 +48,7 @@ def auto_chat(prompt='你好', round=2, model=1, stream=1):
print('机器人 2: ') print('机器人 2: ')
response0 = guan.chat(prompt=response1, model=model, stream=stream) response0 = guan.chat(prompt=response1, model=model, stream=stream)
# 机器人自动对话(引导对话) # 机器人自动对话(引导对话)(当前默认模型为 'hunyuan-lite',无记忆)
def auto_chat_with_guide(prompt='你好', guide_message='回答字数少于30个字最后反问我一个问题', round=5, model=1, stream=1): def auto_chat_with_guide(prompt='你好', guide_message='回答字数少于30个字最后反问我一个问题', round=5, model=1, stream=1):
import guan import guan
response0 = prompt response0 = prompt
@@ -59,8 +59,46 @@ def auto_chat_with_guide(prompt='你好', guide_message='回答字数少于30
print('机器人 2: ') print('机器人 2: ')
response0 = guan.chat(prompt=response1+guide_message, model=model, stream=stream) response0 = guan.chat(prompt=response1+guide_message, model=model, stream=stream)
# 使用 LangChain 无记忆对话(需要 API Key) # 使用 OpenAI 框架对话(需要 API Key)
def langchain_chat_without_memory(prompt="你好", temperature=0.7, system_message=None, print_show=1, load_env=1): def openai_chat(prompt="你好", model="qwen-plus", temperature=0.7, system_message=None, history=[], print_show=1, load_env=1):
import os
from openai import OpenAI
if load_env:
import dotenv
from pathlib import Path
import inspect
caller_frame = inspect.stack()[1]
caller_dir = Path(caller_frame.filename).parent
env_path = caller_dir / ".env"
if env_path.exists():
dotenv.load_dotenv(env_path)
client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("DASHSCOPE_BASE_URL"),
)
if system_message == None:
messages = history+[{"role": "user", "content": prompt}]
else:
messages = [{"role": "system", "content": system_message}]+history+[{"role": "user", "content": prompt}]
completion = client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
stream=True,
)
response = ''
for chunk in completion:
response += chunk.choices[0].delta.content
if print_show:
print(chunk.choices[0].delta.content, end="", flush=True)
if print_show:
print()
history.append({"role": "user", "content": prompt})
history.append({"role": "assistant", "content": response})
return response, history
# 通过 LangChain 加载模型(需要 API Key)
def load_langchain_model(model="qwen-plus", temperature=0.7, load_env=1):
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate from langchain_core.prompts import ChatPromptTemplate
import os import os
@@ -76,7 +114,30 @@ def langchain_chat_without_memory(prompt="你好", temperature=0.7, system_messa
llm = ChatOpenAI( llm = ChatOpenAI(
api_key=os.getenv("OPENAI_API_KEY"), api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("DASHSCOPE_BASE_URL"), base_url=os.getenv("DASHSCOPE_BASE_URL"),
model="qwen-plus", model=model,
temperature=temperature,
streaming=True,
)
return llm
# 使用 LangChain 无记忆对话(需要 API Key)
def langchain_chat_without_memory(prompt="你好", model="qwen-plus", temperature=0.7, system_message=None, print_show=1, load_env=1):
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
import os
if load_env:
import dotenv
from pathlib import Path
import inspect
caller_frame = inspect.stack()[1]
caller_dir = Path(caller_frame.filename).parent
env_path = caller_dir / ".env"
if env_path.exists():
dotenv.load_dotenv(env_path)
llm = ChatOpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("DASHSCOPE_BASE_URL"),
model=model,
temperature=temperature, temperature=temperature,
streaming=True, streaming=True,
) )
@@ -100,7 +161,7 @@ def langchain_chat_without_memory(prompt="你好", temperature=0.7, system_messa
return response return response
# 使用 LangChain 有记忆对话(记忆临时保存在函数的属性上,需要 API Key) # 使用 LangChain 有记忆对话(记忆临时保存在函数的属性上,需要 API Key)
def langchain_chat_with_memory(prompt="你好", temperature=0.7, system_message=None, session_id="default", print_show=1, load_env=1): def langchain_chat_with_memory(prompt="你好", model="qwen-plus", temperature=0.7, system_message=None, session_id="default", print_show=1, load_env=1):
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory
@@ -118,7 +179,7 @@ def langchain_chat_with_memory(prompt="你好", temperature=0.7, system_message=
llm = ChatOpenAI( llm = ChatOpenAI(
api_key=os.getenv("OPENAI_API_KEY"), api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("DASHSCOPE_BASE_URL"), base_url=os.getenv("DASHSCOPE_BASE_URL"),
model="qwen-plus", model=model,
temperature=temperature, temperature=temperature,
streaming=True, streaming=True,
) )
@@ -181,14 +242,14 @@ def ollama_chat(prompt='你好/no_think', model="qwen3:0.6b", temperature=0.8, p
print() print()
return response return response
# ModelScope 加载本地模型和分词器(只加载一次) # ModelScope 加载本地模型和分词器(只加载一次,需要有模型文件
def load_modelscope_model(model_name="D:/models/Qwen/Qwen3-0.6B"): def load_modelscope_model(model_name="D:/models/Qwen/Qwen3-0.6B"):
from modelscope import AutoModelForCausalLM, AutoTokenizer from modelscope import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name)
return model, tokenizer return model, tokenizer
# 使用 ModelScope 本地模型聊天 # 使用 ModelScope 本地模型聊天(需要有模型文件)
def modelscope_chat(model, tokenizer, prompt='你好 /no_think', history=[], temperature=0.7, top_p=0.8, print_show=1): def modelscope_chat(model, tokenizer, prompt='你好 /no_think', history=[], temperature=0.7, top_p=0.8, print_show=1):
from threading import Thread from threading import Thread
from transformers import TextIteratorStreamer from transformers import TextIteratorStreamer
@@ -218,13 +279,13 @@ def modelscope_chat(model, tokenizer, prompt='你好 /no_think', history=[], tem
response += new_text response += new_text
if print_show: if print_show:
print() print()
new_history = history + [ history += [
{"role": "user", "content": prompt}, {"role": "user", "content": prompt},
{"role": "assistant", "content": response} {"role": "assistant", "content": response}
] ]
return response, new_history return response, history
# LLaMA 加载本地模型(只加载一次) # LLaMA 加载本地模型(只加载一次,需要有模型文件
def load_llama_model(model_path="D:/models/Qwen/Qwen3-0.6B-GGUF/Qwen3-0.6B-Q8_0.gguf"): def load_llama_model(model_path="D:/models/Qwen/Qwen3-0.6B-GGUF/Qwen3-0.6B-Q8_0.gguf"):
from llama_cpp import Llama from llama_cpp import Llama
llm = Llama( llm = Llama(
@@ -236,11 +297,11 @@ def load_llama_model(model_path="D:/models/Qwen/Qwen3-0.6B-GGUF/Qwen3-0.6B-Q8_0.
) )
return llm return llm
# 使用 LLaMA 本地模型聊天 # 使用 LLaMA 本地模型聊天(需要有模型文件)
def llama_chat(llm, prompt='你好 /no_think', history=[], temperature=0.7, top_p=0.8, print_show=1): def llama_chat(llm, prompt='你好 /no_think', history=[], temperature=0.7, top_p=0.8, print_show=1):
new_history = history + [{"role": "user", "content": prompt}] history += [{"role": "user", "content": prompt}]
llm_response = llm.create_chat_completion( llm_response = llm.create_chat_completion(
messages=new_history, messages=history,
temperature=temperature, temperature=temperature,
top_p=top_p, top_p=top_p,
repeat_penalty=1.5, repeat_penalty=1.5,
@@ -256,5 +317,5 @@ def llama_chat(llm, prompt='你好 /no_think', history=[], temperature=0.7, top_
print(token, end="", flush=True) print(token, end="", flush=True)
if print_show: if print_show:
print() print()
new_history.append({"role": "assistant", "content": response}) history.append({"role": "assistant", "content": response})
return response, new_history return response, history