Compare commits

...

60 Commits

Author SHA1 Message Date
5b00715f84 Create one-dimensional_infinite_square_well_using_finite_difference_method.py 2025-08-27 17:24:32 +08:00
b411789fd3 update 2025-08-26 17:30:04 +08:00
e58a4612ef Update parallel_calculations_with_python_using_Pool.py 2025-08-15 16:21:34 +08:00
62b8b4a11b Create example_of_parallel_calculation.py 2025-08-15 16:11:23 +08:00
cb35e77c38 Update parallel_calculations_with_python_using_Pool.py 2025-08-14 17:59:34 +08:00
5e77dc87e6 update 2025-08-14 17:32:27 +08:00
b3bfab1569 Update beamer.tex 2025-08-09 21:03:03 +08:00
8131c17258 update 2025-07-29 10:37:06 +08:00
6d7da71eda Update setup.py 2025-07-28 11:14:52 +08:00
00f46a79c0 Update setup.py 2025-07-28 10:51:54 +08:00
e5f835f5de update 2025-07-28 09:16:46 +08:00
15764c8052 Create python_for_loop_time.py 2025-07-25 12:50:14 +08:00
dcf1f324ce update 2025-07-24 17:31:48 +08:00
8423da7ddc update 2025-07-11 17:56:39 +08:00
ef1f0522f4 update 2025-07-10 16:33:55 +08:00
0db8b55a3f update 2025-06-20 21:31:38 +08:00
35c87df603 Update markdown_example.md 2025-05-29 03:46:20 +08:00
984fdf71df update 2025-04-22 02:25:15 +08:00
f3fa228754 Update simple_command.sh 2025-04-16 10:28:02 +08:00
a8bb557a65 update 2025-04-16 04:50:31 +08:00
71f3e4b758 Create one_letter.sh 2025-04-16 04:01:44 +08:00
848f34ef89 update 2025-04-13 12:13:32 +08:00
5327c384ee update 2025-04-04 01:17:00 +08:00
02a3f2b279 update 2025-04-03 23:46:51 +08:00
8d726fe9e6 update 2025-04-03 01:48:49 +08:00
fe7bc6db82 Update matrix_memory.py 2025-03-29 08:56:28 +08:00
07daddb196 Create matrix_memory.py 2025-03-29 08:47:45 +08:00
388be36ab1 Update example_of_timer.py 2025-03-24 18:07:24 +08:00
1a5d874ac0 Update example_of_time_logging.py 2025-03-24 18:02:45 +08:00
b85ae814c3 update 2025-03-24 17:52:33 +08:00
7b07bd90bc Create numba_example.py 2025-03-23 22:16:38 +08:00
2bb0a1ab9b update 2025-03-20 05:29:42 +08:00
ede8d08443 Update LICENSE 2025-03-19 17:22:25 +08:00
652b32fdde Create eta_test_for_integral_of_DOS.py 2025-03-17 16:23:18 +08:00
90d2b600e4 update 2025-03-15 00:00:04 +08:00
7635346f96 update 2025-03-12 00:20:50 +08:00
3776c2c12a update 2025-03-11 18:46:42 +08:00
7fabcff909 Create convert_class_object_to_dict.py 2025-03-10 20:27:38 +08:00
cc7c4200f5 update 2025-03-10 15:52:38 +08:00
4ea5c623cb update 2025-03-10 01:01:23 +08:00
43a9c1a2eb Update matrix_running_time_for_different_num_of_cpu_cores.py 2025-03-09 15:18:41 +08:00
eb11e84063 update 2025-03-09 01:54:07 +08:00
65d9777440 update 2025-03-09 00:09:41 +08:00
249d292bd8 Update matrix_running_time_for_different_num_of_cpu_cores.py 2025-03-08 20:35:02 +08:00
cd9d66c857 Update matrix_running_time_for_different_num_of_cpu_cores.py 2025-03-08 19:59:52 +08:00
ef45071a38 update 2025-03-08 19:33:10 +08:00
a68778dd2e Update matrix_running_time.py 2025-03-08 01:53:34 +08:00
e7e929ff08 Create matrix_running_time.py 2025-03-08 01:06:34 +08:00
33e1dc5118 update 2025-03-07 05:41:59 +08:00
0bcd8ce52e update 2025-03-07 04:15:02 +08:00
cbedb87697 update 2025-03-06 09:20:06 +08:00
60b9410584 update 2025-03-06 07:02:40 +08:00
9cad8f4a9d update 2025-03-01 23:57:16 +08:00
5f3d81fc6c update 2025-02-25 21:41:39 +08:00
ef6bb4e9d2 update 2025-02-25 08:42:31 +08:00
7265410df6 update 2025-02-23 00:01:25 +08:00
e3c4a49292 update 2025-02-19 22:03:38 +08:00
d6d7b6d6a1 Update ollama_with_python.py 2025-01-28 00:59:25 +08:00
3cde4d784f Create KMeans_example.py 2025-01-13 08:10:04 +08:00
8a83969b07 Update python_example.py 2025-01-12 10:53:37 +08:00
135 changed files with 6224 additions and 774 deletions

View File

@@ -9,14 +9,14 @@
2. 列表内容
3. 列表内容
无序列表:用 + - * 任何一种都可以。为了不和其他记号重复,个人倾向于+
无序列表:用 + - * 任何一种都可以。这里推荐使-
+ 列表内容
+ 嵌套前面加几个空格。为了保险起见个人倾向于用四个空格或一个Tab
+ 列表内容
+ 列表嵌套
+ 列表嵌套
+ 列表嵌套
- 列表内容
- 嵌套前面加几个空格。这里推荐使用四个空格缩进,兼容性最好
- 列表内容
- 列表嵌套
- 列表嵌套
- 列表嵌套
*倾斜:前后一个星号*
@@ -30,15 +30,18 @@
print('hello world')
```
分割线:三个或者三个以上的 - 或 * 。为了不和其他记号重复,个人倾向于用 --- 。
分割线:三个或者三个以上的 - 或 * 。这里推荐使用 --- 。
---
在Markdown中空一行可采用以下符号。该符号为HTML中的符号在Markdown中也是支持的。
换行有:
<br />
- 通用场景:优先用两个换行符,即按两次回车
- 行内换行:用两个空格,
加上换行符
- 复杂排版:必要时用 <br> 或其他 HTML 标签
以下是表格的书写形式。其中,第二行用一个横杆也是可以。为了保险起见,个人倾向于用三个横杆。
以下是表格的书写形式。其中,第二行用一个横杆也是可以。这里推荐使用三个横杆。
| 右对齐 | 居中对齐 | 左对齐 |
| ---: | :---: | :--- |

View File

@@ -9,7 +9,7 @@
\author{作者名字\inst{1},作者名字\inst{2}} %作者
\institute{\inst{1}第一个单位\and\inst{2}第二个单位} %这里的\and有换行的效果
\date{\today} %时间(默认也会显示)
\logo{\includegraphics[height=1.0cm]{1.jpg}} %右下角的小log
% \logo{\includegraphics[height=1.0cm]{1.jpg}} %右下角的小log
\begin{document} %正文开始
\begin{frame} %相当于ppt里的一页

View File

@@ -0,0 +1,31 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/4536
"""
import multiprocessing
import os
import time
def run_proc(name): # 要执行的代码
start_time = time.perf_counter()
time.sleep(2)
end_time = time.perf_counter()
print ('Process id running on %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
if __name__ == '__main__':
start_time = time.perf_counter()
# 循环创建进程
processes = []
for i in range(4):
p = multiprocessing.Process(target=run_proc, args=(f'job{i}',))
processes.append(p)
p.start()
# 等待所有进程完成
for p in processes:
p.join()
end_time = time.perf_counter()
print('运行时间(s)=', (end_time-start_time))

View File

@@ -0,0 +1,23 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/4536
"""
import multiprocessing
import os
import time
def run_proc(name):
start_time = time.perf_counter()
time.sleep(2)
end_time = time.perf_counter()
print ('Process id running on %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
return name
if __name__ == '__main__':
start_time = time.perf_counter()
with multiprocessing.Pool() as pool:
results = pool.map(run_proc, [f"task {i}" for i in range(64)])
end_time = time.perf_counter()
print(results)
print(end_time - start_time)

View File

@@ -1,42 +0,0 @@
import numpy as np
from numba import jit
import time
def for_sum(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
@jit
def numba_for_sum(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
numpy_array = np.arange(0,1e8,1)
start = time.time()
result = sum(numpy_array)
end = time.time()
print('\nresult:', result)
print('python中sum()函数求和时间:\n', end - start)
start = time.time()
result = np.sum(numpy_array)
end = time.time()
print('\nresult:', result)
print('numpy.sum()函数求和时间:\n', end - start)
start = time.time()
result = for_sum(numpy_array)
end = time.time()
print('\nresult:', result)
print('for循环求和numpy数组的时间\n', end - start)
start = time.time()
result = numba_for_sum(numpy_array)
end = time.time()
print('\nresult:', result)
print('numba加速for循环求和numpy数组的时间\n', end - start, '\n')

View File

@@ -0,0 +1,78 @@
from numba import jit
import numpy as np
import time
numpy_array = np.arange(0,1e5,1)
times = 1000
def for_sum(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
start = time.time()
for _ in range(times):
result = for_sum(numpy_array)
end = time.time()
print('for循环求和时间', end - start)
start = time.time()
for _ in range(times):
result = sum(numpy_array)
end = time.time()
print('sum()函数求和时间:', end - start)
start = time.time()
for _ in range(times):
result = np.sum(numpy_array)
end = time.time()
print('numpy.sum()函数求和时间:', end - start)
print()
@jit
def numba_for_sum(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
@jit
def numba_np_sum(numpy_array):
result = np.sum(numpy_array)
return result
@jit(nopython=True)
def numba_nopython_np_sum(numpy_array):
result = np.sum(numpy_array)
return result
@jit(nopython=True, parallel=True)
def numba_nopython_parallel_np_sum(numpy_array):
result = np.sum(numpy_array)
return result
start = time.time()
for _ in range(times):
result = numba_for_sum(numpy_array)
end = time.time()
print('numba + for循环求和时间', end - start)
start = time.time()
for _ in range(times):
result = numba_np_sum(numpy_array)
end = time.time()
print('numba + numpy.sum()函数求和时间:', end - start)
start = time.time()
for _ in range(times):
result = numba_nopython_np_sum(numpy_array)
end = time.time()
print('numba(nopython) + numpy.sum()函数求和时间:', end - start)
start = time.time()
for _ in range(times):
result = numba_nopython_parallel_np_sum(numpy_array)
end = time.time()
print('numba(nopython,parallel) + numpy.sum()函数求和时间:', end - start)

View File

@@ -0,0 +1,58 @@
from numba import jit
from numba import prange
import time
import numpy as np
numpy_array = np.arange(0,1e5,1)
times = 1000
def for_sum(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
@jit
def numba_for_sum_1(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
@jit(nopython=True)
def numba_for_sum_2(numpy_array):
sum = 0
for number in numpy_array:
sum += number
return sum
@jit(nopython=True, parallel=True)
def numba_for_sum_3(numpy_array):
sum = 0
for i in prange(len(numpy_array)):
sum += numpy_array[i]
return sum
start = time.time()
for _ in range(times):
result = for_sum(numpy_array)
end = time.time()
print('for循环时间', end - start)
start = time.time()
for _ in range(times):
result = numba_for_sum_1(numpy_array)
end = time.time()
print('@jit时间', end - start)
start = time.time()
for _ in range(times):
result = numba_for_sum_2(numpy_array)
end = time.time()
print('@jit(nopython=True)时间:', end - start)
start = time.time()
for _ in range(times):
result = numba_for_sum_3(numpy_array)
end = time.time()
print('@jit(nopython=True, parallel=True)时间:', end - start)

View File

@@ -0,0 +1,20 @@
import numpy as np
import time
numpy_array = np.arange(0,1e5,1)
times = 1000
from numba import jit
from numba import prange
@jit(nopython=True, parallel=True)
def numba_example(numpy_array):
sum = 0
for i in prange(len(numpy_array)):
sum += numpy_array[i]
return sum
start = time.time()
for _ in range(times):
result = numba_example(numpy_array)
end = time.time()
print(f'运行时间:{end - start}')

View File

@@ -3,9 +3,9 @@ import pickle
data = [1, 2, 3]
# 保存为文件
with open('a.txt', 'wb') as f:
with open('a.pkl', 'wb') as f:
pickle.dump(data, f)
with open('a.txt', 'rb') as f:
with open('a.pkl', 'rb') as f:
data_load_from_file = pickle.load(f)
print(data_load_from_file)
print()

View File

@@ -1,9 +1,60 @@
a = [1, 2] # 数组
a = [1, 2] # list列表
print(a)
print(len(a)) # 数组长度
a.append(3) # 增加元素
print(type(a)) # 对象类型
print(id(a)) # 对象唯一标识符
print(len(a)) # 列表长度
a.append(3) # list列表增加元素
print(a)
b = range(5) # 数列从0开始
print(b)
for i0 in b:
print(i0)
print(sum(a)) # 求和
print(max(a)) # 最大值
print(min(a)) # 最小值
print(abs(-3.14)) # 绝对值
b1 = [2, -1, 3]
b2 = sorted(b1) # 排序,不改变原数列
print(b1)
print(b2)
b3 = list(reversed(b1)) # 反向并转为list列表
print(b1)
print(b3)
c = range(5) # 数列从0开始
print(c)
for i0 in c:
print(i0)
d1 = [1, 2, 3, 3, 2, 1, 1]
d2 = set(d1) # 转成集合,去除重复元素
print(d1)
print(d2)
print(list(d2))
print()
dict_data = {"name": "张三", "age": 30, "city": "北京"} # dict字典
print(dict_data)
print(type(dict_data))
print(dict_data.items())
for key, value in dict_data.items():
print(f'打印字典内容 {key} {value}')
print() # 打印空一行
print(all([True, True, False])) # 所有元素为真,结果为真
print(all([1, 2, True]))
print(any([True, True, False])) # 有一个是真,结果为真
print(any([0, None, ""]))
print()
e = 'abc'
print(e)
print(hash(e)) # 哈希值(如果是多次运行,对于相同的对象,返回的哈希值是不同的)
print(hash(e)) # 哈希值(同一个运行中多次调用 hash(),对于相同的对象,返回的哈希值是相同的)
print()
for i0 in range(3):
exec(f'''
a{i0} = {i0}
print(a{i0})
''') # 执行动态创建的代码
f = eval('1+2') # 执行表达式并返回值
print(f)
f = open('a.txt', 'w') # 打开文件
f.write('test') # 写入文件
f.close() # 关闭文件

View File

@@ -1,2 +1,2 @@
parameter=0
parameter = 0
print(f'hello world {parameter}')

View File

@@ -2,6 +2,10 @@ import guan
parameter_array = [1, 2, 3, 4]
guan.make_sh_file_for_bsub(sh_filename='a', command_line='python a.py', cpu_num=1, task_name='task', queue_name='score', cd_dir=0)
sh_filename = 'a'
task_name = 'task'
py_filename = 'a'
guan.copy_py_sh_file_and_bsub_task(parameter_array, py_filename='a', old_str_in_py='parameter=0', new_str_in_py='parameter=', sh_filename='a', bsub_task_name='task')
guan.make_sh_file_for_bsub(sh_filename=sh_filename, command_line=f'python {py_filename}.py', cpu_num=1, task_name=task_name, queue_name='score', cd_dir=0)
guan.copy_py_sh_file_and_bsub_task(parameter_array, py_filename=py_filename, old_str_in_py='parameter = 0', new_str_in_py='parameter = ', sh_filename=sh_filename, task_name=task_name)

View File

@@ -1,2 +1,2 @@
parameter=0
parameter = 0
print(f'hello world {parameter}')

View File

@@ -2,6 +2,10 @@ import guan
parameter_array = [1, 2, 3, 4]
guan.make_sh_file_for_qsub(sh_filename='a', command_line='python a.py', cpu_num=1, task_name='task', cd_dir=0)
sh_filename = 'a'
task_name = 'task'
py_filename = 'a'
guan.copy_py_sh_file_and_qsub_task(parameter_array=parameter_array, py_filename='a', old_str_in_py='parameter=0', new_str_in_py='parameter=', sh_filename='a', qsub_task_name='task')
guan.make_sh_file_for_qsub(sh_filename=sh_filename, command_line=f'python {py_filename}.py', cpu_num=1, task_name=task_name, cd_dir=0)
guan.copy_py_sh_file_and_qsub_task(parameter_array=parameter_array, py_filename=py_filename, old_str_in_py='parameter = 0 ', new_str_in_py='parameter = ', sh_filename=sh_filename, task_name=task_name)

View File

@@ -1,5 +1,4 @@
#!/bin/sh
#PBS -N task
#PBS -l nodes=1:ppn=1
#PBS -q bigmem
python a.py

View File

@@ -29,8 +29,6 @@ for parameter_str in parameter_str_array:
with open('a'+str(index)+'.py', 'w') as f: # 写入
f.write(content)
# 以下处理任务上传文件
old_file = 'a.sh'
new_file = 'a'+str(index)+'.sh'
@@ -50,7 +48,5 @@ for parameter_str in parameter_str_array:
with open('a'+str(index)+'.sh', 'w') as f: # 写入
f.write(content)
# 提交任务
os.system('qsub '+new_file)

View File

@@ -10,9 +10,9 @@ print(title)
print(stock_data[0])
num = 30
date_array = stock_data[0:num, 0]
opening_array = stock_data[0:num, 1]
closing_array = stock_data[0:num, 2]
high_array = stock_data[0:num, 3]
low_array = stock_data[0:num, 4]
opening_array = stock_data[0:num, 2]
closing_array = stock_data[0:num, 3]
high_array = stock_data[0:num, 4]
low_array = stock_data[0:num, 5]
guan.plot(date_array, closing_array, style='o-', xlabel='date', ylabel='price')
guan.plot_stock_line(date_array, opening_array, closing_array, high_array, low_array)

View File

@@ -44,7 +44,8 @@ stock_symbols_30 = []
for stock_symbol in stock_symbols:
find_300 = re.findall(r'^300', stock_symbol)
find_301 = re.findall(r'^301', stock_symbol)
if find_300 != [] or find_301 != []:
find_302 = re.findall(r'^302', stock_symbol)
if find_300 != [] or find_301 != [] or find_302 != []:
stock_symbols_30.append(stock_symbol)
num_stocks_30 = len(stock_symbols_30)
print('创业板股票数量:', num_stocks_30)
@@ -61,21 +62,19 @@ num_stocks_68= len(stock_symbols_68)
print('科创板股票数量:', num_stocks_68)
# print(stock_symbols_68)
# 新三板以及北交所
# 北交所和新三板
stock_symbols_8_4_9 = []
for stock_symbol in stock_symbols:
find_82 = re.findall(r'^82', stock_symbol)
find_83 = re.findall(r'^83', stock_symbol)
find_87 = re.findall(r'^87', stock_symbol)
find_88 = re.findall(r'^88', stock_symbol)
find_430 = re.findall(r'^430', stock_symbol)
find_420 = re.findall(r'^420', stock_symbol)
find_400 = re.findall(r'^400', stock_symbol)
find_920 = re.findall(r'^920', stock_symbol)
if find_82 != [] or find_83 != [] or find_87 != [] or find_88 != [] or find_430 != [] or find_420 != [] or find_400 != [] or find_920 != []:
if find_83 != [] or find_87 != [] or find_430 != [] or find_420 != [] or find_400 != [] or find_920 != []:
stock_symbols_8_4_9.append(stock_symbol)
num_stocks_8_4_9= len(stock_symbols_8_4_9)
print('新三板以及北交所股票数量:', num_stocks_8_4_9)
print('北交所和新三板股票数量:', num_stocks_8_4_9)
# print(stock_symbols_8_4)
print('所有股票数量:', num_stocks_60+num_stocks_00+num_stocks_30+num_stocks_68+num_stocks_8_4_9)

View File

@@ -1,4 +1,4 @@
# 数组分割示例
# 数组分割
import numpy as np
import guan
task_num = 4

View File

@@ -0,0 +1,7 @@
# 变量写入文件
import guan
import numpy as np
data = np.array([1, 2, 3])
guan.dump_data(data, filename='a')
loaded_data = guan.load_data(filename='a')
print(loaded_data)

View File

@@ -0,0 +1,16 @@
# 检查是否为厄米矩阵相对误差为1e-5)
import guan
matrix1 = [
[2, 1.00001-1j],
[1+1j, 1]
]
print(guan.is_hermitian(matrix1))
matrix2 = [
[2, 1.00002-1j],
[1+1j, 1]
]
print(guan.is_hermitian(matrix2))

View File

@@ -0,0 +1,33 @@
# 循环参数计算
import guan
import numpy as np
def test_1(x):
return 2*x
x_array = np.arange(0, 5, 1)
result_array = guan.loop_calculation_with_one_parameter(test_1, x_array)
print(result_array)
guan.plot(x_array, result_array)
print()
def test_2(x, y):
return x+y
x_array = np.arange(0, 5, 1)
y_array = np.arange(0, 3, 1)
result_array = guan.loop_calculation_with_two_parameters(test_2, x_array, y_array)
print(result_array)
guan.plot_contour(x_array, y_array, result_array)
print()
def test_3(x, y, z):
return x+y+z
x_array = np.arange(0, 5, 1)
y_array = np.arange(0, 3, 1)
z_array = np.arange(0, 2, 1)
result_array = guan.loop_calculation_with_three_parameters(test_3, x_array, y_array, z_array)
print(result_array)
guan.plot_contour(y_array, z_array, result_array[:, :, 4])

View File

@@ -0,0 +1,15 @@
# 并行计算
import guan
import time
import os
def run_proc(name):
start_time = time.time()
time.sleep(5)
end_time = time.time()
print ('Process id running on name %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
return f'name_{name}'
if __name__ == '__main__':
result_array = guan.parallel_calculation_with_multiprocessing_Pool(func=run_proc, args_list=range(32), show_time=1)
print(result_array)

View File

@@ -0,0 +1,4 @@
# 打印数组
import guan
a = [1, 2, 3, 'a', 'b', 'c']
guan.print_array(a)

View File

@@ -0,0 +1,7 @@
# 运行时间日志
import guan
import time
guan.logging_with_day_and_time(content='start')
for i in range(3):
time.sleep(5)
guan.logging_with_day_and_time(f'end_of_{i}')

View File

@@ -0,0 +1,25 @@
# 函数计时器
import guan
@guan.timer_decorator
def test1(a, b):
import time
print(a)
time.sleep(1)
print(b)
print('Run finished.')
for _ in range(2):
test1(10, b=20)
print()
def test2(a, b):
import time
print(a)
time.sleep(1)
print(b)
print('Run finished.')
for _ in range(2):
guan.timer(test2, 100, b=200)

View File

@@ -0,0 +1,22 @@
import guan
@guan.try_decorator
def test1(a, b):
print(a)
bug_code
print(b)
return 'return_message1'
result1 = test1(10, b=20)
print(result1)
print()
def test2(a, b):
print(a)
bug_code
print(b)
return 'return_message2'
result2 = guan.try_except(test2, 100, b=200)
print(result2)

View File

@@ -0,0 +1,6 @@
# 矩阵写入文件后查看
import guan
import numpy as np
matrix = np.random.rand(5, 5)
guan.write_matrix_in_markdown_format(matrix=matrix, filename='markdown_matrix')
guan.write_matrix_in_latex_format(matrix=matrix, filename='latex_matrix')

View File

@@ -1,4 +1,4 @@
# 能带图计算示例
# 能带图计算
import guan
import numpy as np
k_array = np.linspace(-np.pi, np.pi, 100)

View File

@@ -1,4 +1,4 @@
# 陈数和Wilson loop计算示例
# 陈数和Wilson loop计算
import guan
import numpy as np
chern_number = guan.calculate_chern_number_for_square_lattice_with_efficient_method(guan.hamiltonian_of_one_QAH_model, precision=100)

View File

@@ -1,4 +1,4 @@
# 电导和散射矩阵的计算示例
# 电导和散射矩阵的计算
import guan
import numpy as np

View File

@@ -1,4 +1,4 @@
# 使用格林函数计算态密度示例
# 使用格林函数计算态密度
import guan
import numpy as np

View File

@@ -1,4 +1,4 @@
# 波函数规范的选取示例
# 波函数规范的选取
import numpy as np
import cmath
import guan

View File

@@ -1,4 +1,4 @@
# 实空间哈密顿量的示例
# 实空间哈密顿量
import guan
print('\n', guan.hamiltonian_of_finite_size_system_along_one_direction(3), '\n')
print(guan.hamiltonian_of_finite_size_system_along_two_directions_for_square_lattice(2, 2), '\n')

View File

@@ -1,11 +0,0 @@
# 函数的计时器
import guan
@guan.timer_decorator
def my_function():
import time
time.sleep(2)
print('Run finished')
for _ in range(3):
my_function()

View File

@@ -0,0 +1,9 @@
MIT License
Copyright (c) 2025 Ji-Huan Guan
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,3 @@
## chat.guanjihuan.com
本仓库记录这篇博文中的代码https://www.guanjihuan.com/archives/38502

View File

@@ -0,0 +1,15 @@
# pip install --upgrade huggingface_hub
from huggingface_hub import snapshot_download
snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', local_dir_use_symlinks=False)
# # 选择性下载
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', ignore_patterns='*.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00001-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00002-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00003-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00004-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00005-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00006-of-00007.bin', local_dir_use_symlinks=False)
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00007-of-00007.bin', local_dir_use_symlinks=False)

View File

@@ -0,0 +1,58 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
import ollama
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
model_name = 'llama3.2'
prompt = st.chat_input("在这里输入您的内容")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_chat(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
response = ollama.chat(model=model_name, messages=st.session_state.messages, stream=True)
full_content = ''
for part in response:
full_content += part['message']['content']
message_placeholder.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant',
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response = response_of_chat(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,105 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
choose_load_method = 1 # 选择加载模型的方式
if choose_load_method == 0:
# 默认加载需要13G显存
@st.cache_resource
def load_model_chatglm3():
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm3-6b-32k",trust_remote_code=True).half().cuda()
model = model.eval()
return model, tokenizer
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
elif choose_load_method == 1:
# 量化加载需要6G显存
@st.cache_resource
def load_model_chatglm3():
from transformers import AutoTokenizer, BitsAndBytesConfig, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
nf4_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
)
model = AutoModelForCausalLM.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True, quantization_config=nf4_config)
model = model.eval()
return model, tokenizer
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
elif choose_load_method == 2:
# 在CPU上加载需要25G内存对话速度会比较慢不推荐
@st.cache_resource
def load_model_chatglm3():
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm3-6b-32k",trust_remote_code=True).float()
model = model.eval()
return model, tokenizer
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
with st.sidebar:
with st.expander('参数', expanded=True):
max_length = 409600
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.8
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def chat_response_chatglm3(prompt):
history, past_key_values = st.session_state.history_ChatGLM3, st.session_state.past_key_values_ChatGLM3
for response, history, past_key_values in model_chatglm3.stream_chat(tokenizer_chatglm3, prompt, history,
past_key_values=past_key_values,
max_length=max_length, top_p=top_p,
temperature=temperature,
return_past_key_values=True):
message_placeholder_chatglm3.markdown(response)
if stop_button:
break
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
st.session_state.history_ChatGLM3 = history
st.session_state.past_key_values_ChatGLM3 = past_key_values
return response
def clear_all():
st.session_state.history_ChatGLM3 = []
st.session_state.past_key_values_ChatGLM3 = None
st.session_state.ai_response = []
if 'history_ChatGLM3' not in st.session_state:
st.session_state.history_ChatGLM3 = []
if 'past_key_values_ChatGLM3' not in st.session_state:
st.session_state.past_key_values_ChatGLM3 = None
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder_chatglm3 = st.empty()
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
chat_response_chatglm3(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,33 @@
# basic requirements
protobuf>=4.25.2
transformers>=4.36.2
tokenizers>=0.15.0
cpm_kernels>=1.0.11
torch>=2.1.0
gradio>=4.14.0
sentencepiece>=0.1.99
sentence_transformers>=2.2.2
accelerate>=0.26.1
streamlit>=1.30.0
fastapi>=0.109.0
loguru~=0.7.2
mdtex2html>=1.2.0
latex2mathml>=3.77.0
# for openai demo
openai>=1.7.2
zhipuai>=2.0.0
pydantic>=2.5.3
sse-starlette>=1.8.2
uvicorn>=0.25.0
timm>=0.9.12
tiktoken>=0.5.2
# for langchain demo
langchain>=0.1.0
langchainhub>=0.1.14
arxiv>=2.1.0

View File

@@ -0,0 +1,112 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
@st.cache_resource
def load_model_internlm_7B():
# internlm需要 7G 显存)
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
nf4_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
)
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True, quantization_config=nf4_config)
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True, torch_dtype=torch.bfloat16)
model = model.eval()
return model, tokenizer
model_internlm_7B, tokenizer_internlm_7B = load_model_internlm_7B()
with st.sidebar:
with st.expander('参数', expanded=True):
max_length = 409600
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.8
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
from tools.transformers.interface import GenerationConfig, generate_interactive
def prepare_generation_config():
generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature)
return generation_config
def combine_history(prompt, messages):
total_prompt = ""
for message in messages:
cur_content = message["content"]
if message["role"] == "user":
cur_prompt = user_prompt.replace("{user}", cur_content)
elif message["role"] == "robot":
cur_prompt = robot_prompt.replace("{robot}", cur_content)
else:
raise RuntimeError
total_prompt += cur_prompt
total_prompt = total_prompt + cur_query_prompt.replace("{user}", prompt)
return total_prompt
user_prompt = "<|User|>:{user}<eoh>\n"
robot_prompt = "<|Bot|>:{robot}<eoa>\n"
cur_query_prompt = "<|User|>:{user}<eoh>\n<|Bot|>:"
generation_config = prepare_generation_config()
if "messages_internlm_7B" not in st.session_state:
st.session_state.messages_internlm_7B = []
from dataclasses import asdict
def chat_response_internlm_7B(prompt):
real_prompt = combine_history(prompt, messages = st.session_state.messages_internlm_7B)
st.session_state.messages_internlm_7B.append({"role": "user", "content": prompt, "avatar": 'user'})
for cur_response in generate_interactive(
model=model_internlm_7B,
tokenizer=tokenizer_internlm_7B,
prompt=real_prompt,
additional_eos_token_id=103028,
**asdict(generation_config),
):
message_placeholder_internlm_7B.markdown(cur_response + "")
if stop_button:
break
message_placeholder_internlm_7B.markdown(cur_response)
st.session_state.messages_internlm_7B.append({"role": "robot", "content": cur_response, "avatar": "assistant"})
st.session_state.ai_response.append({"role": "robot", "content": cur_response, "avatar": "assistant"})
return cur_response
def clear_all():
st.session_state.messages_internlm_7B = []
st.session_state.ai_response = []
if 'messages_internlm_7B' not in st.session_state:
st.session_state.messages_internlm_7B = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder_internlm_7B = st.empty()
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
chat_response_internlm_7B(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,111 @@
本目录提供辅助模型训练的一些工具,文件结构如下所示:
```bash
├── transformers # 适配hugging face的transformers的一些工具
│ ├── configuration_internlm.py # config适配工具
│ ├── modeling_internlm.py # model适配工具
│ ├── tokenization_internlm.py # tokenizer适配工具
│ └── convert2hf.py # 模型适配hugging face工具
└── tokenizer.py # 将原始数据转换成bin和meta文件的工具
```
# tokenizer.py
生成原始数据的`bin``meta`文件需要使用`tokenizer`,我们通过在`tools/tokenizer.py`中指定模型参数路径的方式来导入tokenizer模型。目前我们提供了`V7_sft.model`来生成tokens。若想使用不同的模型可直接修改`tokernizer.py`中的模型参数路径。
可以运行以下命令生成原始数据对应的`bin``meta`文件,其中参数`text_input_path`表示原始文本数据路径,目前支持`txt``json``jsonl`三种输入格式,`bin_output_path`表示生成的`bin`文件的保存路径。
```bash
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
```
下面是一个数据处理的例子:
给定一个包含原始数据集的文件`raw_data.txt`,原始数据集如下所示:
```bash
感恩生活中的每一个细节,才能真正体会到幸福的滋味。
梦想是人生的动力源泉,努力追逐,才能实现自己的目标。
学会宽容和理解,才能建立真正和谐的人际关系。
```
可以通过运行以下命令来生成`bin``meta`文件:
```bash
$ python tools/tokenizer.py --text_input_path raw_data.txt --bin_output_path cn/output.bin
```
需要注意的是,生成的`bin`文件需要保存在`cn`或者`en`或者`code`或者`ja`或者`ar`或者`kaoshi`这五个目录下,以区分数据集的类型。
其中,`cn`表示中文数据集;`en`表示英文数据集;`code`表示代码数据集;`ja`表示日语数据集;`ar`表示阿拉伯语数据集;`kaoshi`表示考试数据集。
生成的bin文件的格式如下
```python
{"tokens": [73075, 75302, 69522, 69022, 98899, 67713, 68015, 81269, 74637, 75445, 99157]}
{"tokens": [69469, 60355, 73026, 68524, 60846, 61844, 98899, 67775, 79241, 98899, 67713, 67800, 67453, 67838, 99157]}
{"tokens": [68057, 79017, 60378, 68014, 98899, 67713, 67990, 68015, 70381, 67428, 61003, 67622, 99157]}
```
`bin`文件中的每一行均对应原始数据集中的每一个句子,表示每个句子的`token`下文将用sequence指定
生成的`meta`文件的格式如下:
```bash
(0, 11), (90, 15), (208, 13)
```
`meta`文件中,每个元组对应着`bin`文件中每一个`sequence`的元信息。其中,元组的第一个元素表示每个`sequence`在所有`sequence`中的`starting index`,第二个元素表示每个`sequence`中有多少个`tokens`
例如,对于第一个`sequence``starting index`为 0有 11 个`tokens`;对于第二个`sequence`,由于第一个`sequence`转换为`string`后的长度为`89`,因此它的`starting index`为 90有 15 个`tokens`
`json``jsonl`类型的文件的`bin``meta`文件格式和`txt`一致,此处不再赘叙。
# pal_inference.py
在 [GSM8K](https://huggingface.co/datasets/gsm8k) 数据集上使用 [PAL](https://github.com/reasoning-machines/pal) 范式推理,使模型编写代码并通过 Python 解释器执行来解决数学问题。其用法如下:
```python
# 用法:
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
# 参数:
# <model> 用于推理的模型的路径。
# <out_dir> 生成代码将保存在指定的输出文件夹中。
# 可选参数:
# --dataset <dataset> 用于代码生成的数据集名称默认gsm8k
# --max_length <length> 模型最大输入 token 长度默认2048
# --top_p <threshold> 候选 token 相加的概率阈值默认0.8)。
# --eoh <end token> 用户输入结束标识符 (默认: "") 。
# --eoa <end token> 模型输入结束标识符 (默认: "") 。
# --eos <end token> 系统输入结束标识符. (默认: "") 。
# --temperature -t <temp> 生成过程中的采样温度默认1.0)。
# --time_out <time> 执行生成的代码的最大时间默认100
# --verbose, -v 打印代码错误信息(可选)。
# --append, -a 将输出追加到历史结果中(可选)。
```
以下是使用示例:
```bash
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
```
其输出文件每一行包括输入的问题,正确答案,执行答案,得分,以及模型生成的 Python 代码块:
````json
{
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
"target": 18.0,
"answer": 18.0,
"score": 1,
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
}
````
InternLM 在 GSM8K 数据集中带工具和不带工具的性能表现:
| Method | **InternLM-Chat-7B** |
| -------- | -------------------- |
| w/o tool | 34.5 |
| w tool | 39.2 |

View File

@@ -0,0 +1,109 @@
This directory provide some tools for model training with the following file structure.
```bash
├── transformers # tools for adapting Hugging Face's transformers
│ ├── configuration_internlm.py # tools for adapting config
│ ├── modeling_internlm.py # tools for adapting model
│ └── tokenization_internlm.py # tools for adapting tokenizer
│ └── convert2hf.py # tools for adapting models to Hugging Face's format
└── tokenizer.py # tools for generating `bin` and `meta` file for raw data
```
# tokenizer.py
We need to use a `tokenizer` to generate `bin` and `meta` files for raw data. We import the tokenizer model by specifying the model weight path in `tools/tokenizer.py`. Currently, we provide `V7.model` to generate tokens. If you want to use a different model, you can modify the model weight path in `tokenizer.py` directly.
We can run the following command to generate `bin` and `meta` files corresponding to the original data. The parameter `text_input_path` represents the path of the original text data, currently supporting `txt`, `json`, and `jsonl` formats, while `bin_output_path` represents the save path of the generated `bin` files.
```bash
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
```
An example of data processing in `txt` format is given here:
Given a file `raw_data.txt` containg raw data with the following content.
```bash
Appreciate every detail in life to truly taste the flavor of happiness.
Dreams are the source of lifes motivation. Pursue them diligently to achieve your goals.
Learn to be tolerant and understanding to establish truly harmonious interpersonal relationships.
```
Next, we can run the following command to generate `bin` and `meta` files for raw data.
```bash
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
```
It should be noted that the generated `bin` files should be placed in one of the following directories to clarify the data type: `cn`(Chinese), `en`(English), `code`(code data), `ja`(Japanese), `ar`(Arabic) and `kaoshi`(kaoshi data).
The format of generated `bin` file is as follows.
```python
{"tokens": [98655, 2317, 2922, 6649, 1595, 7856, 435, 2424, 442, 9556, 12807, 410, 17313, 446, 23331, 95746]}
{"tokens": [98655, 302, 1383, 269, 657, 410, 2687, 446, 2424, 98667, 269, 25220, 281, 523, 1874, 492, 1248, 38127, 4563, 442, 11227, 829, 8980, 95746]}
{"tokens": [98655, 24190, 442, 517, 15013, 649, 454, 8793, 442, 5849, 9556, 17917, 1369, 1084, 29890, 12021, 95746]}
```
In the generated `bin` file, each line (`sequence`) corresponds to the `tokens` for each sentence in the raw data.
The format of generated `meta` file in as follows.
```bash
(0, 16), (110, 24), (262, 17)
```
Each tuple in the `meta` file represents the meta information of each `sequence` where the first element in the tuple indicates the `starting index` of each `sequence` among all `sequences` and the second element indicates the amount of `tokens` for each `sequence`.
For example, the `starting index` is 0 for the first `sequence` with 16 `tokens`. Since the length of `sequence` in `string` format is 109, the `starting index` is 110. And the number of `tokens` of the sencond `sequence` is 24.
The `bin` and `meta` file formats for `json` and `jsonl` type files are the same as for `txt`, so we won't go over them here.
# pal_inference.py
Perform reasoning using [PAL](https://github.com/reasoning-machines/pal) on the [GSM8K](https://huggingface.co/datasets/gsm8k) dataset, allowing the model to generate code and solve mathematical problems through Python interpretation. Here's how you can use it:
```bash
# Usage:
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
# Parameters:
# <model> Path to the model used for inference.
# <out_dir> Generated code will be saved in the specified output folder.
# Optional arguments:
# --dataset <dataset> Dataset name used for code generation (default: gsm8k).
# --max_length <length> Model's maximum input token length (default: 2048).
# --top_p <threshold> Probability threshold for candidate tokens (default: 0.8).
# --eoh <end token> End of human (user) token. (default: "").
# --eoa <end token> End of assistant (bot) token. (default: "").
# --eos <end token> End of system token. (default: "").
# --temperature, -t <temp> Sampling temperature during generation (default: 1.0).
# --time_out <time> Maximum time (in seconds) for executing the generated code (default: 100).
# --verbose, -v Print code error messages (optional).
# --append, -a ppend the output to historical results (optional).
```
Below is an example of usage:
```bash
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
```
The output file contains each line with the input question, the correct answer, the executed answer, the score, and the Python code block generated by the model:
````json
{
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
"target": 18.0,
"answer": 18.0,
"score": 1,
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
}
````
InternLM performance in the GSM8K dataset with and without tools:
| Method | **InternLM-Chat-7B** |
| -------- | -------------------- |
| w/o tool | 34.5 |
| w tool | 39.2 |

View File

@@ -0,0 +1,164 @@
import argparse
import json
import os.path as osp
from pathlib import Path
import numpy as np
import sentencepiece as spm
from tqdm import tqdm
def process(dataset_path, sp_model):
"""Process data sample from input dataset
Args:
dataset_path (str): Path of dataset json file.
sp_model (str): Path of tokenizer.
Yields:
tuple: dumped processed data sample and length of tokens.
"""
dataset = json.load(open(dataset_path))
for data in dataset:
yield tokenize(get_chat_format_data(data), sp_model)
def get_chat_format_data(ori_data):
"""Format original data
Args:
ori_data (dict): input data sample.
Returns:
dict: data sample with chat format.
"""
input_str = ori_data["input"]
instruction_str = ori_data["instruction"]
output_str = ori_data["output"]
data = dict()
if input_str != "":
data["user"] = f"<|User|>:{instruction_str}\n{input_str}"
else:
data["user"] = f"<|User|>:{instruction_str}"
data["bot"] = f"<|Bot|>:{output_str}"
return data
def tokenize(sample, sp_model):
"""Tokenize input dataset
Args:
sample (dict): Input data sample.
sp_model (str): Path of tokenizer.
Returns:
tuple: dumped processed data sample and length of tokens.
"""
special_tokens_map = {"<eoh>": 103167, "<eoa>": 103166, "nl_id": 13}
token_ids = [sp_model.bos_id()]
human_s = sample["user"]
ass_s = sample["bot"]
human_ids = sp_model.encode(human_s) + [special_tokens_map["<eoh>"], special_tokens_map["nl_id"]]
human_ids_ignore = [-token_id for token_id in human_ids]
ass_template_ids = sp_model.encode("<|Bot|>:")
ass_template_ids_ignore = [-token_ids for token_ids in ass_template_ids]
ass_ids = (
ass_template_ids_ignore
+ sp_model.encode(ass_s[8:])
+ [special_tokens_map["<eoa>"], special_tokens_map["nl_id"]]
)
token_ids += human_ids_ignore + ass_ids
if len(token_ids) > 2047:
token_ids = token_ids[:2047]
token_ids += [sp_model.eos_id()]
line = str.encode(json.dumps({"tokens": token_ids}) + "\n")
return line, len(token_ids)
def dump_bin_meta_bin(samples, path, split_ratio=0.1):
"""Dump processed dataset
Args:
samples (dict): Input data sample.
path (str): Path for output dataset.
split_ratio (float): Ratio for validation dataset splitting.
Default to: 0.1.
Returns:
tuple: number of train/valid tokens of processed dataset,
number of train/valid samples of processed dataset.
"""
train_path = osp.join(path, "train/en/")
valid_path = osp.join(path, "valid/en/")
train_dir = Path(train_path)
valid_dir = Path(valid_path)
train_dir.mkdir(exist_ok=True, parents=True)
valid_dir.mkdir(exist_ok=True, parents=True)
train_f = open(train_dir.joinpath("dataset.bin"), "wb")
valid_f = open(valid_dir.joinpath("dataset.bin"), "wb")
train_tokens = 0
valid_tokens = 0
last_train_position = 0
last_valid_position = 0
train_samples = 0
valid_samples = 0
train_meta = []
valid_meta = []
sample_length = len(samples)
np.random.seed(0)
valid_indices = np.random.choice(range(sample_length), int(sample_length * split_ratio)).tolist()
count = -1
for line, token_num in samples:
count += 1
if count in valid_indices:
valid_tokens += token_num
valid_f.write(line)
valid_meta.append((last_valid_position, token_num))
last_valid_position += len(line)
valid_samples += 1
else:
train_tokens += token_num
train_f.write(line)
train_meta.append((last_train_position, token_num))
last_train_position += len(line)
train_samples += 1
train_f.close()
valid_f.close()
np.save(open(train_dir.joinpath("dataset.bin.meta"), "wb"), train_meta)
np.save(open(valid_dir.joinpath("dataset.bin.meta"), "wb"), valid_meta)
return train_tokens, valid_tokens, train_samples, valid_samples
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dataset_path", type=str, help="path of dataset json file")
parser.add_argument("output_path", type=str, help="path of processed dataset")
parser.add_argument("tokenizer_path", type=str, help="path of tokenizer")
parser.add_argument("--split_ratio", type=float, default=0.1, help="ratio for validation dataset splitting")
args = parser.parse_args()
sp_model = spm.SentencePieceProcessor(model_file=args.tokenizer_path)
split_ratio = args.split_ratio
samples = []
dataset = process(args.dataset_path, sp_model)
for sample in tqdm(dataset):
samples.append(sample)
train_tokens, valid_tokens, train_samples, valid_samples = dump_bin_meta_bin(
samples, args.output_path, args.split_ratio
)
print(f"number of train dataset: {train_samples}, number of train dataset token: {train_tokens}")
print(f"number of validation dataset: {valid_samples}, number of validation dataset token: {valid_tokens}")

View File

@@ -0,0 +1,320 @@
# This file is modified from:
# hhttps://github.com/reasoning-machines/pal/blob/main/pal/core/interface.py
#
# Copyright 2022 PAL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import json
import os
from dataclasses import asdict
from typing import Any, Dict, List
import torch
import tqdm
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from tools.transformers.interface import GenerationConfig, generate_interactive
from internlm.utils.timeout import Timeout
def parse_args():
parser = argparse.ArgumentParser(description="PAL Inference")
parser.add_argument("model", type=str, help="Path to the pre-trained LLM used for inference.")
parser.add_argument(
"out_dir", type=str, help="Name of the output folder where generated code snippets will be saved."
)
parser.add_argument("--dataset", default="gsm8k", type=str, help="Name of the dataset used for code generation.")
parser.add_argument(
"--max_length",
default=2048,
type=int,
help="Maximum input token length for the natural language description.",
)
parser.add_argument(
"--top_p",
default=0.8,
type=float,
help="Probability threshold to choose sample tokens during generation.",
)
parser.add_argument(
"--eoh",
default="",
type=str,
help="End of human (user) token.",
)
parser.add_argument(
"--eoa",
default="",
type=str,
help="End of assistant (bot) token.",
)
parser.add_argument(
"--eos",
default="",
type=str,
help="End of system token.",
)
parser.add_argument(
"--temperature", "-t", default=1.0, type=float, help="Temperature of token sampling during generation."
)
parser.add_argument(
"--time_out", default=100, type=float, help="Maximum time allowed for executing generated code."
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Print code error information when executing generated code (optional).",
)
parser.add_argument("--append", "-a", action="store_true", help="Append output to the history results (optional).")
args = parser.parse_args()
return args
class GenericRuntime:
"""Adapted from https://github.com/reasoning-machines/pal"""
GLOBAL_DICT: dict = {}
LOCAL_DICT = None
HEADERS: List = []
def __init__(self):
self._global_vars = copy.copy(self.GLOBAL_DICT)
self._local_vars = copy.copy(self.LOCAL_DICT) if self.LOCAL_DICT else None
for c in self.HEADERS:
self.exec_code(c)
def exec_code(self, code_piece: str) -> None:
exec(code_piece, self._global_vars)
def eval_code(self, expr: str) -> Any:
return eval(expr, self._global_vars)
def inject(self, var_dict: Dict[str, Any]) -> None:
for k, v in var_dict.items():
self._global_vars[k] = v
@property
def answer(self):
return self._global_vars["answer"]
class PALInterface:
"""PAL interface wrap fun:`generate_interactive` to extract and execute
generated code.
Adapted from https://github.com/reasoning-machines/pal
Args:
model (AutoModelForCausalLM)
tokenizer (AutoTokenizer)
generation_config (GenerationConfig): Decode strategies
additional_eos_token_id (int): End of sentence token id, default: 103028
get_answer_expr (str): The function name of generated code, default: "solution()"
verbose (bool): Print error information
"""
def __init__(
self,
model: AutoModelForCausalLM,
tokenizer: AutoTokenizer,
generation_config: GenerationConfig,
additional_eos_token_id: int = 103028,
get_answer_expr: str = "solution()",
verbose: bool = False,
):
self.runtime = GenericRuntime()
self.history: List = []
self.model = model
self.tokenizer = tokenizer
self.generation_config = generation_config
self.additional_eos_token_id = additional_eos_token_id
self.answer_expr = get_answer_expr
self.verbose = verbose
def generate(self, prompt):
# The api will generate response word by word
# we only need the last generation as the final results
for cur_gen in generate_interactive(
model=self.model,
tokenizer=self.tokenizer,
prompt=prompt,
additional_eos_token_id=self.additional_eos_token_id,
**asdict(self.generation_config),
):
continue
# Get final response
self.history.append(cur_gen)
# Extract code block
code = self.process_generation_to_code(cur_gen)
return code
def process_generation_to_code(self, gens: str):
if "```python" in gens:
gens = gens.split("```python")[1].split("```")[0]
elif "```" in gens:
gens = gens.split("```")[1].split("```")[0]
code = gens.split("\n")
return code
def run(self, prompt, time_out: float = 100):
code = self.generate(prompt)
with Timeout(time_out):
try:
exec_result = self.execute(code)
except Exception as e:
if self.verbose:
print(e)
return exec_result
def execute(self, code: List[str]):
self.runtime.exec_code("\n".join(code))
return self.runtime.eval_code(self.answer_expr)
def clear_history(self):
self.history = []
def load_model(args):
model = AutoModelForCausalLM.from_pretrained(args.model, trust_remote_code=True).to(torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
return model, tokenizer
def load_data(args):
# Load data from huggingface dataset
if args.dataset == "gsm8k":
gsm8k = load_dataset(path=args.dataset, name="main")
test_set = gsm8k["test"]
input_data = []
for data in test_set:
question = data["question"]
target = float(data["answer"].split("#")[-1].replace(",", ""))
input_data.append({"question": question, "target": target})
else:
raise NotImplementedError
return input_data
PROMPT = """<|System|>:You are a helpful assistant which use tools to solve mathematical reasoning questions. The tools you can use are:
PythonExecutor: It can execute Python code. The code must be a function, and the function name must be 'solution'. The example format is as follows:
```python
def solution():
variable_names_with_real_meaning = func(variable)
return variable_names_with_real_meaning
```{eos}
<|User|>:Olivia has $23. She bought five bagels for $3 each. How much money does she have left?{eoh}
<|Bot|>:
```python
def solution():
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
```{eoa}
<|User|>:Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?{eoh}
<|Bot|>:
```python
def solution():
golf_balls_initial = 58
golf_balls_lost_tuesday = 23
golf_balls_lost_wednesday = 2
golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday
result = golf_balls_left
return result
```{eoa}
<|User|>:There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?{eoh}
<|Bot|>:
```python
def solution():
computers_initial = 9
computers_per_day = 5
num_days = 4 # 4 days between monday and thursday
computers_added = computers_per_day * num_days
computers_total = computers_initial + computers_added
result = computers_total
return result
```{eoa}
<|System|>:How about this question?{eos}
<|User|>:{question}{eoh}
<|Bot|>:""".strip()
def main():
args = parse_args()
print("load model begin.")
model, tokenizer = load_model(args)
print("load model end.")
generation_config = GenerationConfig(max_length=args.max_length, top_p=args.top_p, temperature=args.temperature)
verbose = args.verbose
interface = PALInterface(model=model, tokenizer=tokenizer, generation_config=generation_config, verbose=verbose)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
savepath = os.path.join(args.out_dir, args.dataset + ".json")
# Load from history results
if args.append and os.path.exists(savepath):
lines = open(savepath).readlines()
num_skip_exps = len(lines)
scores = [x["score"] for x in map(json.loads, lines)]
else:
num_skip_exps = 0
scores = []
examples = load_data(args)
with open(savepath, "a" if args.append else "w") as f:
pbar = tqdm.tqdm(examples[num_skip_exps:], initial=num_skip_exps, total=len(examples))
for x in pbar:
question = x["question"]
result = copy.copy(x)
try:
answer = interface.run(
prompt=PROMPT.format(question=question, eoh=args.eoh, eoa=args.eoa, eos=args.eos),
time_out=args.time_out,
)
answer = float(answer)
score = 1 if abs(answer - x["target"]) < 1e-3 else 0
except Exception as e:
if verbose:
print(e)
answer = ""
score = 0
scores.append(score)
result["answer"] = answer
result["score"] = score
result["generation"] = interface.history
f.write(json.dumps(result) + "\n")
interface.clear_history()
f.flush()
print(f"{args.model}: Accuracy - {sum(scores) / len(scores)}")
torch.cuda.empty_cache()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,142 @@
import argparse
import json
import os
import sys
import numpy as np
current_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_dir, "V7_sft.model")
sys.path.append(os.path.join(current_dir, "transformers"))
from tokenization_internlm import InternLMTokenizer
tokenizer = InternLMTokenizer(vocab_file=model_path)
def write_bin(context: str, bin_file) -> None:
"""
Write bin file based on the context.
Args:
context (str): the context of raw file.
bin_file (file handler): the opened bin file.
Example:
>>> write_bin("今天天气晴朗适合出门散步", "out.bin") # the output file format is 'txt'
>>> out.bin
>>> {"tokens": [67577, 69095, 63010, 61770, 67783, 69301, 74732]}
"""
# encode the context into tokens, which is a list, eg. [67577, 69095, 63010, 61770, 67783, 69301, 74732]
tokens = tokenizer.encode(context)
# transfer the list into dic, key is str 'tokens', value is tokens.
# eg. {"tokens": [67577, 69095, 63010, 61770, 67783, 69301, 74732]}
data = dict(tokens=tokens)
# encode the data into bytes to save
saved_bin = str.encode(json.dumps(data) + "\n")
# write bytes into bin_file
bin_file.write(saved_bin)
def prepare_meta(bin_output_path: str):
"""
Prepare metadata for the given bin file.
Args:
bin_output_path (str): Output bin file path.
"""
meta = []
cur = 0
with open(bin_output_path, "rb") as f:
while True:
# read lines
line = f.readline()
# if line is empty, then break
if line == b"":
break
# obtain the token amount of each line
length = len(json.loads(line)["tokens"])
# meta is a list of tuple(cur, length)
# cur: the start index of each line
# length: the token amount of each line
meta.append((cur, length))
# update the cur to generate the meta information of next line
cur += len(line)
# define path of the generated meta file
meta_fp = bin_output_path + ".meta"
# save the generated meta information
with open(meta_fp, "wb") as f:
meta = np.array(meta, dtype=np.int32)
np.save(f, meta)
def text2bin(text_input_path: str, bin_output_path: str):
"""
Read content from the input file and write to bin file.
Currently support 3 input formats: 'txt', 'json' and 'jsonl'.
Args:
text_input_path (str): txt file path.
bin_output_path (str): output bin file path.
"""
# Check if the txt file exists
if not os.path.isfile(text_input_path):
raise FileNotFoundError(f"{text_input_path} does not exist.")
file_format = text_input_path.split(".")[-1]
assert file_format in ["txt", "json", "jsonl"], print(
"Invalid input file type. Currently support `txt`, `json` and `jsonl`."
)
with open(text_input_path, "r") as text_file, open(bin_output_path, "ab") as bin_file:
if file_format == "txt":
for line in text_file:
# Strip any leading/trailing whitespace
stripped_line = line.strip()
if stripped_line:
# Pass each line to the write_bin function
write_bin(stripped_line, bin_file)
elif file_format == "json":
data = json.load(text_file)
# assuming data is a list of dictionaries
for record in data:
# the type of record is dict, transfer the dict into str
context = json.dumps(record)
# encode the str and write into bin
write_bin(context, bin_file)
elif file_format == "jsonl":
for line in text_file:
# encode the str and write into bin
write_bin(line, bin_file)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--text_input_path",
type=str,
required=True,
help="Path to the input text file.",
)
parser.add_argument("--bin_output_path", type=str, required=True, help="Path to the output bin file.")
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
text2bin(args.text_input_path, args.bin_output_path)
print(f"Successfully converted {args.text_input_path} to {args.bin_output_path}")
# To avoid potential read/write errors, the metadata preparation follows after creating the .bin file.
prepare_meta(args.bin_output_path)
print(f"Successfully generated {args.bin_output_path}.meta")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,25 @@
# InternLM Transformers
[English](./README.md) |
[简体中文](./README-zh-Hans.md)
该文件夹下包含了 transformers 格式的 `InternLM` 模型。
## 权重转换
`convert2hf.py` 可以将训练保存的权重一键转换为 transformers 格式。在仓库根目录运行以下命令:
```bash
python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
```
然后可以使用 `from_pretrained` 接口加载:
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
```
`intern_moss_example.py` 展示了如何使用 LoRA 来在 `fnlp/moss-moon-002-sft` 数据集上进行微调的样例。

View File

@@ -0,0 +1,23 @@
# InternLM Transformers
[English](./README.md) |
[简体中文](./README-zh-Hans.md)
This folder contains the `InternLM` model in transformers format.
## Weight Conversion
`convert2hf.py` can convert saved training weights into the transformers format with a single command. Execute the command in the root directory of repository:
```bash
python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
```
Then, you can load it using the `from_pretrained` interface:
```python
>>> from transformers import AutoTokenizer, AutoModel
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
```
`intern_moss_example.py` demonstrates an example of how to use LoRA for fine-tuning on the `fnlp/moss-moon-002-sft` dataset.

View File

@@ -0,0 +1,120 @@
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" InternLM model configuration"""
from transformers.utils import logging
from transformers.configuration_utils import PretrainedConfig
logger = logging.get_logger(__name__)
INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class InternLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the InternLM-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`InternLMModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import InternLMModel, InternLMConfig
>>> # Initializing a InternLM internlm-7b style configuration
>>> configuration = InternLMConfig()
>>> # Initializing a model from the internlm-7b style configuration
>>> model = InternLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "internlm"
_auto_class = "AutoConfig"
def __init__(
self,
vocab_size=103168,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
bias=True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.bias = bias
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)

View File

@@ -0,0 +1,175 @@
import argparse
import math
import json
import os
import re
import tempfile
import torch
from modeling_internlm import InternLMConfig, InternLMForCausalLM
from tokenization_internlm import InternLMTokenizer
NUM_SHARDS = {
"7B": 1,
}
def convert2hf(model_config, states_tp_pps):
with tempfile.TemporaryDirectory() as folder:
states = merge_pp(states_tp_pps)[0]
if "embedding.word_embeddings.weight" in states:
embedding_key = "embedding.word_embeddings.weight"
elif "embedding.weight" in states:
embedding_key = "embedding.weight"
else:
print("Check embedding states'names in below:", flush=True)
print(list(states.keys()), flush=True)
dims_per_head = model_config["hidden_size"] // model_config["num_attention_heads"]
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
current_states = {}
current_states["model.embed_tokens.weight"] = states.pop(embedding_key)
current_states["model.norm.weight"] = states.pop("norm.weight")
current_states["lm_head.weight"] = states.pop("head.weight")
for i in range(model_config["num_layers"]):
states.pop(f"blocks.{i}.mixer.rotary_emb.inv_freq", None)
wqkv = states.pop(f"blocks.{i}.mixer.Wqkv.weight").reshape(
3, model_config["num_attention_heads"], -1, model_config["hidden_size"]
)
bqkv = states.pop(f"blocks.{i}.mixer.Wqkv.bias").reshape(3, model_config["num_attention_heads"], -1)
current_states[f"model.layers.{i}.self_attn.q_proj.weight"] = wqkv[0].reshape(
-1, model_config["hidden_size"]
)
current_states[f"model.layers.{i}.self_attn.q_proj.bias"] = bqkv[0].reshape(-1)
current_states[f"model.layers.{i}.self_attn.k_proj.weight"] = wqkv[1].reshape(
-1, model_config["hidden_size"]
)
current_states[f"model.layers.{i}.self_attn.k_proj.bias"] = bqkv[1].reshape(-1)
current_states[f"model.layers.{i}.self_attn.v_proj.weight"] = wqkv[2].reshape(
-1, model_config["hidden_size"]
)
current_states[f"model.layers.{i}.self_attn.v_proj.bias"] = bqkv[2].reshape(-1)
current_states[f"model.layers.{i}.self_attn.o_proj.weight"] = states.pop(
f"blocks.{i}.mixer.out_proj.weight"
)
current_states[f"model.layers.{i}.self_attn.o_proj.bias"] = states.pop(f"blocks.{i}.mixer.out_proj.bias")
current_states[f"model.layers.{i}.mlp.gate_proj.weight"] = states.pop(f"blocks.{i}.mlp.w1.weight")
current_states[f"model.layers.{i}.mlp.down_proj.weight"] = states.pop(f"blocks.{i}.mlp.w3.weight")
current_states[f"model.layers.{i}.mlp.up_proj.weight"] = states.pop(f"blocks.{i}.mlp.w2.weight")
current_states[f"model.layers.{i}.input_layernorm.weight"] = states.pop(f"blocks.{i}.norm1.weight")
current_states[f"model.layers.{i}.post_attention_layernorm.weight"] = states.pop(f"blocks.{i}.norm2.weight")
current_states[f"model.layers.{i}.self_attn.rotary_emb.inv_freq"] = inv_freq
config = InternLMConfig(
hidden_size=model_config["hidden_size"],
intermediate_size=compute_intermediate_size(model_config["hidden_size"]),
num_attention_heads=model_config["num_attention_heads"],
num_hidden_layers=model_config["num_layers"],
rms_norm_eps=1e-06,
bias=True,
)
if model_config["vocab_size"] != -1:
config.vocab_size = model_config["vocab_size"]
config.save_pretrained(folder)
torch.save(current_states, os.path.join(folder, "pytorch_model.bin"))
model = InternLMForCausalLM.from_pretrained(folder, torch_dtype=torch.float16)
del model.config._name_or_path
return config, model
def compute_intermediate_size(n):
return int(math.ceil(n * 8 / 3) + 255) // 256 * 256
def merge_pp(states_tp_pp):
max_tp = len(states_tp_pp)
max_pp = len(states_tp_pp[0])
full_states = []
for tp in range(max_tp):
layer_shift = 0
tp_states = {}
for pp in range(max_pp):
_layer_shift = 0
states = states_tp_pp[tp][pp]
keys = list(states.keys())
for key in keys:
match = re.search("\.\d+\.", key)
if match is not None:
s, e = match.span()
layer_idx = int(key[s + 1 : e - 1]) + layer_shift
_layer_shift = max(_layer_shift, int(key[s + 1 : e - 1]))
name = key[:s] + f".{layer_idx}." + key[e:]
tp_states[name] = states[key]
else:
tp_states[key] = states[key]
layer_shift += _layer_shift + 1
full_states.append({(key[6:] if key.startswith("model.") else key): value for key, value in tp_states.items()})
return full_states
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--src_folder', type=str, default='~/test/') # 需要转换为hf格式的checkpoint文件夹
parser.add_argument('--tgt_folder', type=str, default='~/output/') # 存放转换后checkpoint的目标文件夹
parser.add_argument('--tokenizer', type=str, default='~/test/tokenizer.model') # Tokenizer 文件的路径
args = parser.parse_args()
def load(fp):
with open(fp, "rb") as f:
pt_data = torch.load(f, map_location="cpu")
return pt_data
folder = args.src_folder
target_folder = args.tgt_folder
model_config = load(os.path.join(folder, "model_config.pt"))
fns = list(os.listdir(folder))
model_fns = []
for fn in fns:
if fn.startswith("model_t") and not fn.endswith("md5"):
model_fns.append(fn)
max_tp, max_pp = -1, -1
for fn in model_fns:
_, tp, pp = os.path.splitext(fn)[0].split("_")
max_pp = max(max_pp, int(pp[2:]) + 1)
max_tp = max(max_tp, int(tp[2:]) + 1)
states_tp_pps = [[]]
for pp in range(max_pp):
model_name = f"model_tp0_pp{pp}.pt"
states = load(os.path.join(folder, model_name))
states_tp_pps[0].append(states)
config, model = convert2hf(model_config, states_tp_pps)
os.makedirs(target_folder, exist_ok=True)
model.save_pretrained(target_folder, max_shard_size="20GB")
# TODO There should be a better way to add this.
with open(os.path.join(target_folder, "config.json")) as fp:
config_dict = json.load(fp)
config_dict["auto_map"]["AutoModel"] = "modeling_internlm.InternLMForCausalLM"
with open(os.path.join(target_folder, "config.json"), "w") as fp:
json.dump(config_dict, fp, indent=2)
tokenizer = InternLMTokenizer(args.tokenizer)
tokenizer.save_pretrained(target_folder)

View File

@@ -0,0 +1,137 @@
import copy
import warnings
from dataclasses import dataclass
from typing import Callable, List, Optional
import torch
from torch import nn
from transformers import AutoModel, AutoTokenizer
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList
from transformers.utils import logging
logger = logging.get_logger(__name__)
@dataclass
class GenerationConfig:
max_length: Optional[int] = None
top_p: Optional[float] = None
temperature: Optional[float] = None
do_sample: Optional[bool] = True
repetition_penalty: Optional[float] = 1.0
@torch.inference_mode()
def generate_interactive(
model,
tokenizer,
prompt,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
additional_eos_token_id: Optional[int] = None,
**kwargs,
):
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
input_length = len(inputs["input_ids"][0])
for k, v in inputs.items():
inputs[k] = v.cuda()
input_ids = inputs["input_ids"]
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
if generation_config is None:
generation_config = model.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
if additional_eos_token_id is not None:
eos_token_id.append(additional_eos_token_id)
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_new_tokens`."
)
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
logits_processor = model._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
stopping_criteria = model._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = model._get_logits_warper(generation_config)
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
scores = None
while True:
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = model(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
if generation_config.do_sample:
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(probs, dim=-1)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = model._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=False
)
unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long())
output_token_ids = input_ids[0].cpu().tolist()
output_token_ids = output_token_ids[input_length:]
for each_eos_token_id in eos_token_id:
if output_token_ids[-1] == each_eos_token_id:
output_token_ids = output_token_ids[:-1]
response = tokenizer.decode(output_token_ids)
yield response
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break

View File

@@ -0,0 +1,69 @@
import torch
from torch.utils.data import DataLoader
from peft import get_peft_model, LoraConfig, TaskType
from transformers import get_linear_schedule_with_warmup
from transformers import AutoModelForCausalLM, AutoTokenizer
from tqdm import tqdm
from moss_002_sft import get_dataset, collate_fn
model_path = "model_path"
data_dir = "moss_002_sft"
data_num = -1
test_size = 10
train_batch_size = 1
epochs = 5
val_per_steps = 1000
lr = 9e-6
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM, r=32, lora_alpha=32, lora_dropout=0.1,
target_modules=["gate_proj", "down_proj", "up_proj", "q_proj", "k_proj", "v_proj", "o_proj"]
)
# model
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = get_peft_model(model, peft_config)
model.cuda()
# dataset
train_dataset, val_dataset = get_dataset(tokenizer, data_dir, num=data_num, test_size=test_size)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, collate_fn=lambda x: collate_fn(x, tokenizer))
optimizer = torch.optim.AdamW(model.parameters(), lr)
scheduler = get_linear_schedule_with_warmup(
optimizer, 1000, epochs * len(train_dataloader)
)
# train
fp = open("output", "w")
model.train()
for epoch in tqdm(range(epochs), desc="Traning Epoch"):
batch_bar = tqdm(train_dataloader, desc="Training Batch")
for step, batch in enumerate(batch_bar):
batch = {k:v.cuda() for k, v in batch.items()}
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
output = model(**batch)
loss = output.loss
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
batch_bar.set_postfix({"loss": loss.item()})
if (step + 1) % val_per_steps == 0:
fp.write(f"Epoch {epoch} Batch {step}: Loss={loss.item()}\n")
for i in tqdm(range(len(val_dataset)), desc="Generating"):
data, label = val_dataset[i]
prefix = tokenizer.decode(data.tolist(), skip_special_tokens=True)
try:
generate = model.generate(input_ids=data.unsqueeze(0).cuda(), temperature=0.7, top_k=50, do_sample=True, repetition_penalty=1.02, max_new_tokens=100, top_p=0.9)
text = tokenizer.decode(generate[0].tolist(), skip_special_tokens=True)
text = text.replace(prefix, "")
fp.write(f"Prefix: {prefix}\nGenerated: {text}" + "\n---------------------------------\n")
except Exception as e:
fp.write(f"Prefix: {prefix}\nError: {e}" + "\n---------------------------------\n")
fp.write("\n==============================\n")
model.train()
torch.cuda.empty_cache()

View File

@@ -0,0 +1,105 @@
import os
import copy
import torch
from torch.utils.data import Dataset
from datasets import load_dataset, Dataset as HFDataset
class SFTDataset(Dataset):
# https://github.com/OpenLMLab/MOSS/blob/main/finetune_moss.py
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data = copy.deepcopy(self.dataset[index]["input_ids"])
no_loss_spans = copy.deepcopy(self.dataset[index]["no_loss_spans"])
data = torch.tensor(data, dtype=torch.long)
label = copy.deepcopy(data)
for no_loss_span in no_loss_spans:
label[no_loss_span[0] : no_loss_span[1]] = -100
return data, label
def collate_fn(batch, tokenizer):
batch_input_ids, batch_labels = [], []
for input_ids, label in batch:
batch_input_ids.append(input_ids)
batch_labels.append(label)
batch_input_ids = torch.nn.utils.rnn.pad_sequence(batch_input_ids, batch_first=True, padding_value=tokenizer.eos_token_id)
batch_labels = torch.nn.utils.rnn.pad_sequence(batch_labels, batch_first=True, padding_value=-100)
return {
"input_ids": batch_input_ids,
"attention_mask": (batch_input_ids == tokenizer.eos_token_id).long(),
"labels": batch_labels
}
def process(sample, tokenizer, max_len):
chat = sample["plain_text"].split("<eoa>")[:-1]
num_turns = sample["num_turns"]
meta_instruction = sample["prefix"]
# encode instruction
instruction_ids = tokenizer.encode(meta_instruction)
assert isinstance(instruction_ids, list), instruction_ids
assert len(instruction_ids) > 0, len(instruction_ids)
input_ids = copy.deepcopy(instruction_ids)
# We do not calculate loss for instruction.
no_loss_spans = [(0, len(instruction_ids))]
for i in range(num_turns):
# Collect dialogues
cur_turn_ids = []
cur_no_loss_spans = []
# Add to cur_turn_ids
cur_turn_ids.extend(tokenizer.encode(chat[i] + "<eoa>"))
# if key == 'Tool Responses':
# # The format tokens (<|Results|>:...<eor>\n) should have losses.
# cur_no_loss_spans.append((len(input_ids + cur_turn_ids) + 5, len(input_ids + cur_turn_ids + cur_ids) - 2))
if len(input_ids + cur_turn_ids) > max_len:
# Too long, break
break
# Extend input_ids
input_ids.extend(cur_turn_ids)
no_loss_spans.extend(cur_no_loss_spans)
if len(input_ids) == len(instruction_ids):
# No dialogue, return
return {"input_ids": [], "no_loss_spans": []}
else:
return {"input_ids": input_ids, "no_loss_spans": no_loss_spans}
def load_data(save_dir, tokenizer, max_len, num=-1) -> HFDataset:
if os.path.exists(save_dir):
print(f"Loading moss-002-sft from {save_dir}")
else:
print(f"Loading moss-002-sft from datasets")
moss_sft = load_dataset("fnlp/moss-002-sft-data", split="train")
moss_sft = moss_sft.map(lambda x:process(x, tokenizer, max_len), num_proc=10)
moss_sft = moss_sft.filter(lambda x:len(x["input_ids"]) != 0)
moss_sft.save_to_disk(save_dir)
moss_sft = HFDataset.load_from_disk(save_dir)
if num != -1:
moss_sft = moss_sft.select(range(num))
print(
f"Load successfully, total {len(moss_sft)} samples.")
return moss_sft
def get_dataset(tokenizer, save_dir, max_len=1024, num=-1, test_size=0.1):
moss_sft_data = load_data(save_dir, tokenizer, max_len, num)
moss_sft_split = moss_sft_data.train_test_split(test_size=test_size)
train_dataset = SFTDataset(moss_sft_split["train"])
val_dataset = SFTDataset(moss_sft_split["test"])
return train_dataset, val_dataset

View File

@@ -0,0 +1,998 @@
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch InternLM model."""
import math
from typing import List, Optional, Tuple, Union
import threading, queue
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.generation.streamers import BaseStreamer
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from configuration_internlm import InternLMConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "InternLMConfig"
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class InternLMRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
InternLMRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class InternLMRotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
# Build here to make `torch.jit.trace` work.
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class InternLMMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
):
super().__init__()
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
class InternLMAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: InternLMConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.max_position_embeddings = config.max_position_embeddings
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
class InternLMDecoderLayer(nn.Module):
def __init__(self, config: InternLMConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = InternLMAttention(config=config)
self.mlp = InternLMMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
)
self.input_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
INTERNLM_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`InternLMConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare InternLM Model outputting raw hidden-states without any specific head on top.",
INTERNLM_START_DOCSTRING,
)
class InternLMPreTrainedModel(PreTrainedModel):
config_class = InternLMConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["InternLMDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, InternLMModel):
module.gradient_checkpointing = value
INTERNLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare InternLM Model outputting raw hidden-states without any specific head on top.",
INTERNLM_START_DOCSTRING,
)
class InternLMModel(InternLMPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
Args:
config: InternLMConfig
"""
_auto_class = "AutoModel"
def __init__(self, config: InternLMConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
hidden_states = inputs_embeds
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
position_ids,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class InternLMForCausalLM(InternLMPreTrainedModel):
_auto_class = "AutoModelForCausalLM"
def __init__(self, config):
super().__init__(config)
self.model = InternLMModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, InternLMForCausalLM
>>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
prompt = ""
for record in history:
prompt += f"""<s><|User|>:{record[0]}<eoh>\n<|Bot|>:{record[1]}<eoa>\n"""
if len(prompt) == 0:
prompt += "<s>"
prompt += f"""<|User|>:{query}<eoh>\n<|Bot|>:"""
return tokenizer([prompt], return_tensors="pt")
@torch.no_grad()
def chat(self,
tokenizer,
query: str,
history: List[Tuple[str, str]] = [],
streamer: Optional[BaseStreamer] = None,
max_new_tokens: int = 1024,
do_sample: bool = True,
temperature: float = 0.8,
top_p: float = 0.8,
**kwargs):
inputs = self.build_inputs(tokenizer, query, history)
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
outputs = self.generate(**inputs,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=do_sample,
temperature=temperature,
top_p=top_p,
**kwargs)
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]):]
response = tokenizer.decode(outputs, skip_special_tokens=True)
response = response.split("<eoa>")[0]
history = history + [(query, response)]
return response, history
@torch.no_grad()
def stream_chat(self,
tokenizer,
query: str,
history: List[Tuple[str, str]] = [],
max_new_tokens: int = 1024,
do_sample: bool = True,
temperature: float = 0.8,
top_p: float = 0.8,
**kwargs):
"""
Return a generator in format: (response, history)
Eg.
('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
"""
response_queue = queue.Queue(maxsize=20)
class ChatStreamer(BaseStreamer):
def __init__(self, tokenizer) -> None:
super().__init__()
self.tokenizer = tokenizer
self.queue = response_queue
self.query = query
self.history = history
self.response = ""
self.received_inputs = False
self.queue.put((self.response, history + [(self.query, self.response)]))
def put(self, value):
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("ChatStreamer only supports batch size 1")
elif len(value.shape) > 1:
value = value[0]
if not self.received_inputs:
# The first received value is input_ids, ignore here
self.received_inputs = True
return
token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
if token.strip() != "<eoa>":
self.response = self.response + token
history = self.history + [(self.query, self.response)]
self.queue.put((self.response, history))
def end(self):
self.queue.put(None)
def stream_producer():
return self.chat(
tokenizer=tokenizer,
query=query,
streamer=ChatStreamer(tokenizer=tokenizer),
history=history,
max_new_tokens=max_new_tokens,
do_sample=do_sample,
temperature=temperature,
top_p=top_p,
**kwargs
)
def consumer():
producer = threading.Thread(target=stream_producer)
producer.start()
while True:
res = response_queue.get()
if res is not None:
return
yield res
return consumer()
@add_start_docstrings(
"""
The InternLM Model transformer with a sequence classification head on top (linear layer).
[`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
INTERNLM_START_DOCSTRING,
)
class InternLMForSequenceClassification(InternLMPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = InternLMModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
else:
sequence_lengths = -1
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)

View File

@@ -0,0 +1,242 @@
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for IntermLM."""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
PRETRAINED_VOCAB_FILES_MAP = {}
class InternLMTokenizer(PreTrainedTokenizer):
"""
Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
_auto_class = "AutoTokenizer"
def __init__(
self,
vocab_file,
unk_token="<unk>",
bos_token="<s>",
eos_token="</s>",
pad_token="</s>",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
add_bos_token=True,
add_eos_token=False,
decode_with_prefix_space=False,
clean_up_tokenization_spaces=False,
**kwargs,
):
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
self.vocab_file = vocab_file
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self.decode_with_prefix_space = decode_with_prefix_space
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
self._no_prefix_space_tokens = None
""" Initialisation"""
@property
def no_prefix_space_tokens(self):
if self._no_prefix_space_tokens is None:
vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("")}
return self._no_prefix_space_tokens
@property
def vocab_size(self):
"""Returns vocab size"""
return self.sp_model.get_piece_size()
@property
def bos_token_id(self) -> Optional[int]:
return self.sp_model.bos_id()
@property
def eos_token_id(self) -> Optional[int]:
return self.sp_model.eos_id()
def get_vocab(self):
"""Returns vocab as a dict"""
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
"""Returns a tokenized string."""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def _maybe_add_prefix_space(self, tokens, decoded):
if tokens and tokens[0] not in self.no_prefix_space_tokens:
return " " + decoded
else:
return decoded
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
prev_is_special = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
out_string = self.clean_up_tokenization(out_string)
out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
return out_string[1:]
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if self.add_bos_token:
bos_token_ids = [self.bos_token_id]
else:
bos_token_ids = []
output = bos_token_ids + token_ids_0
if token_ids_1 is not None:
output = output + token_ids_1
if self.add_eos_token:
output = output + [self.eos_token_id]
return output
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]

View File

@@ -0,0 +1,109 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
choose_load_model = 0 # 选择加载的模型Qwen-7B 或 Qwen-14B
if choose_load_model == 0:
# Qwen-7B需要8G显存
@st.cache_resource
def load_model_qwen_7B():
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat-Int4", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen-7B-Chat-Int4",
device_map="auto",
trust_remote_code=True,
).eval()
return tokenizer, model
tokenizer_qwen_7B, model_qwen_7B = load_model_qwen_7B()
elif choose_load_model == 1:
# Qwen-14B需要12G显存
@st.cache_resource
def load_model_qwen_14B():
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-14B-Chat-Int4", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen-14B-Chat-Int4",
device_map="auto",
trust_remote_code=True
).eval()
return tokenizer, model
tokenizer_qwen_14B, model_qwen_14B = load_model_qwen_14B()
with st.sidebar:
with st.expander('参数', expanded=True):
max_length = 409600
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.8
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
from transformers.generation import GenerationConfig
if choose_load_model == 0:
config_qwen_7b = GenerationConfig.from_pretrained(
"Qwen/Qwen-7B-Chat-Int4", trust_remote_code=True, resume_download=True, max_length = max_length, top_p = top_p, temperature = temperature
)
def chat_response_qwen_7B(query):
for response in model_qwen_7B.chat_stream(tokenizer_qwen_7B, query, history=st.session_state.history_qwen, generation_config=config_qwen_7b):
message_placeholder_qwen.markdown(response)
if stop_button:
break
st.session_state.history_qwen.append((query, response))
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
return response
elif choose_load_model == 1:
config_qwen_14b = GenerationConfig.from_pretrained(
"Qwen/Qwen-14B-Chat-Int4", trust_remote_code=True, resume_download=True, max_length = max_length, top_p = top_p, temperature = temperature
)
def chat_response_qwen_14B(query):
for response in model_qwen_14B.chat_stream(tokenizer_qwen_14B, query, history=st.session_state.history_qwen, generation_config=config_qwen_14b):
message_placeholder_qwen.markdown(response)
if stop_button:
break
st.session_state.history_qwen.append((query, response))
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
return response
def clear_all():
st.session_state.history_qwen = []
st.session_state.ai_response = []
if 'history_qwen' not in st.session_state:
st.session_state.history_qwen = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder_qwen = st.empty()
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
if choose_load_model == 0:
chat_response_qwen_7B(prompt)
elif choose_load_model == 1:
chat_response_qwen_14B(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,6 @@
transformers==4.32.0
accelerate
tiktoken
einops
transformers_stream_generator==0.0.4
scipy

View File

@@ -0,0 +1,82 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
choose_load_method = 1
if choose_load_method == 0:
# GPU加载需要5G显存
@st.cache_resource
def load_bark_model():
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained("suno/bark")
model = AutoModel.from_pretrained("suno/bark").to("cuda")
return model, processor
model, processor = load_bark_model()
elif choose_load_method == 1:
# GPU加载bark-small模型需要3G显存
@st.cache_resource
def load_bark_model():
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained("suno/bark-small")
model = AutoModel.from_pretrained("suno/bark-small").to("cuda")
return model, processor
model, processor = load_bark_model()
elif choose_load_method == 2:
# CPU加载bark模型需要9G内存运行速度慢不推荐
@st.cache_resource
def load_bark_model():
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained("suno/bark")
model = AutoModel.from_pretrained("suno/bark")
return model, processor
model, processor = load_bark_model()
elif choose_load_method == 3:
# CPU加载bark-small模型需要5G内存运行速度慢不推荐
@st.cache_resource
def load_bark_model():
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained("suno/bark-small")
model = AutoModel.from_pretrained("suno/bark-small")
return model, processor
model, processor = load_bark_model()
prompt = st.chat_input("在这里输入您的命令")
prompt_placeholder = st.empty()
with prompt_placeholder.container():
with st.chat_message("user", avatar='user'):
pass
if prompt:
with prompt_placeholder.container():
with st.chat_message("user", avatar='user'):
st.write(prompt)
st.write('正在转换中,请稍后。')
inputs = processor(
text=[prompt],
return_tensors="pt",
)
if choose_load_method == 0 or choose_load_method == 1:
inputs = {key: value.to("cuda") for key, value in inputs.items()}
speech_values = model.generate(**inputs, do_sample=True)
import scipy
sampling_rate = 24_000
scipy.io.wavfile.write('./a.wav', rate=sampling_rate, data=speech_values.cpu().numpy().squeeze())
audio_file = open('./a.wav', 'rb')
audio_bytes = audio_file.read()
st.audio(audio_bytes, format='audio/wav')

View File

@@ -0,0 +1,78 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import openai
API_BASE = "https://api.deepseek.com"
API_KEY = "your key"
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_deepseek_chat(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
client = openai.OpenAI(
api_key=API_KEY,
base_url=API_BASE
)
completion = client.chat.completions.create(
model="deepseek-chat",
messages=st.session_state.messages,
stream=True,
temperature=temperature,
top_p=top_p,
)
full_content = ''
for chunk in completion:
response = chunk.choices[0].delta.content or ""
full_content += response
message_placeholder.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant',
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_deepseek_chat(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,92 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import openai
API_BASE = "https://api.deepseek.com"
API_KEY = "your key"
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_deepseek_chat(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
client = openai.OpenAI(
api_key=API_KEY,
base_url=API_BASE
)
completion = client.chat.completions.create(
model="deepseek-reasoner",
messages=st.session_state.messages,
stream=True,
temperature=temperature,
top_p=top_p,
)
full_content = ''
all_full_content = ''
think_or_not = 1
answer_or_not = 1
for chunk in completion:
response = chunk.choices[0].delta.content
reasoning_content = chunk.choices[0].delta.reasoning_content
if response == None:
if think_or_not == 1:
all_full_content += '[开始思考]\n\n'
think_or_not = 0
all_full_content += reasoning_content
else:
if answer_or_not == 1:
all_full_content += '\n\n[结束思考]\n\n'
answer_or_not = 0
all_full_content += response
full_content += response
message_placeholder.markdown(all_full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant',
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": all_full_content, "avatar": "assistant"})
return all_full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_deepseek_chat(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,75 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import openai
API_KEY = ""
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_gpt(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
client = openai.OpenAI(
api_key=API_KEY,
)
completion = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=st.session_state.messages,
stream=True,
temperature=temperature,
top_p=top_p,
)
full_content = ''
for chunk in completion:
response = chunk.choices[0].delta.content or ""
full_content += response
message_placeholder.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant',
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_gpt(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,90 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
from zhipuai import ZhipuAI # 在这个版本中测试有效Version-2.1.5.20250106
client = ZhipuAI(api_key="")
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, value=0.7, step=0.01, key='top_p_session')
temperature = st.slider('temperature', 0.01, 1.0, value=0.95, step=0.01, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.7
st.session_state['temperature_session'] = 0.95
reset_parameter_button = st.button('重置', on_click=reset_parameter)
def chatglm_chat(prompt=[]):
response = client.chat.completions.create(
model="glm-4-air",
messages=prompt,
top_p= top_p,
temperature= temperature,
stream=True
)
return response
def getlength(text):
length = 0
for content in text:
temp = content["content"]
leng = len(temp)
length += leng
return length
def checklen(text):
while (getlength(text) > 8000):
del text[0]
return text
def getText(role,content, text):
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
answer = ""
if "text0" not in st.session_state:
st.session_state.text0 = []
if "messages0" not in st.session_state:
st.session_state.messages0 = []
def clear_all0():
st.session_state.messages0 = []
st.session_state.text0 = []
if st.session_state.messages0 == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages0:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
prompt_text = st.chat_input("请在这里输入您的命令")
if prompt_text:
if st.session_state.messages0 != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages0.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text0 = getText("user", prompt_text, st.session_state.text0)
question = checklen(st.session_state.text0)
response = chatglm_chat(question)
for chunk in response:
answer += chunk.choices[0].delta.content or ""
message_placeholder.markdown(answer)
st.session_state.text0 = getText("assistant", answer, st.session_state.text0)
st.session_state.messages0.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all0, key='clear0')

View File

@@ -0,0 +1,97 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
try:
import zhipuai
except:
import os
os.system('pip install zhipuai==1.0.7')
import zhipuai
# 说明:当前代码只对 pip install zhipuai==1.0.7 有效,对最新版本不兼容。
# 从官网获取 API_KEY
zhipuai.api_key = " "
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, value=0.7, step=0.01, key='top_p_session')
temperature = st.slider('temperature', 0.01, 1.0, value=0.95, step=0.01, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.7
st.session_state['temperature_session'] = 0.95
reset_parameter_button = st.button('重置', on_click=reset_parameter)
def chatglm_chat(prompt=[]):
response = zhipuai.model_api.sse_invoke(
model="glm-3-turbo",
prompt=prompt,
temperature=temperature,
top_p=top_p,
)
return response
def getlength(text):
length = 0
for content in text:
temp = content["content"]
leng = len(temp)
length += leng
return length
def checklen(text):
while (getlength(text) > 8000):
del text[0]
return text
def getText(role,content, text):
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
answer = ""
if "text0" not in st.session_state:
st.session_state.text0 = []
if "messages0" not in st.session_state:
st.session_state.messages0 = []
def clear_all0():
st.session_state.messages0 = []
st.session_state.text0 = []
if st.session_state.messages0 == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages0:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
prompt_text = st.chat_input("请在这里输入您的命令")
if prompt_text:
if st.session_state.messages0 != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages0.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text0 = getText("user", prompt_text, st.session_state.text0)
question = checklen(st.session_state.text0)
response = chatglm_chat(question)
for event in response.events():
answer += event.data
message_placeholder.markdown(answer)
st.session_state.text0 = getText("assistant", answer, st.session_state.text0)
st.session_state.messages0.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all0, key='clear0')

View File

@@ -0,0 +1,74 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
from volcenginesdkarkruntime import Ark
# 从官网获取 API_KEY
client = Ark(api_key='')
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_doubao(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
stream = client.chat.completions.create(
model="",
messages = st.session_state.messages,
stream=True,
top_p=top_p,
temperature=temperature,
)
full_content = ''
for chunk in stream:
if not chunk.choices:
continue
response = chunk.choices[0].delta.content
full_content += response
message_placeholder.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant', 'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_doubao(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,86 @@
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import requests
import json
def get_access_token():
"""
使用 API KeySecret Key 获取access_token替换下列示例中的应用API Key、应用Secret Key
"""
url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=[应用API Key]&client_secret=[应用Secret Key]"
payload = json.dumps("")
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return response.json().get("access_token")
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_ernie_speed_128k(prompt):
st.session_state.messages.append({'role': "user", 'content': prompt})
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k?access_token=" + get_access_token()
payload = json.dumps({
"messages": st.session_state.messages,
"top_p": top_p,
"temperature": temperature,
"stream": True
})
headers = {'Content-Type': 'application/json'}
response = requests.request("POST", url, headers=headers, data=payload, stream=True)
full_content = ''
for line in response.iter_lines():
try:
dict_data = json.loads(line.decode("UTF-8")[5:])
full_content += dict_data['result']
message_placeholder.markdown(full_content)
except:
pass
if stop_button:
break
st.session_state.messages.append({'role': "assistant",
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_ernie_speed_128k(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,98 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import json
import types
# 安装pip install --upgrade tencentcloud-sdk-python
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder_hunyuan = st.empty()
def response_of_hunyuan(prompt):
st.session_state.messages.append({'Role': 'user', 'Content': prompt})
# 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey此处还需注意密钥对的保密
# 代码泄露可能会导致 SecretId 和 SecretKey 泄露并威胁账号下所有资源的安全性。以下代码示例仅供参考建议采用更安全的方式来使用密钥请参见https://cloud.tencent.com/document/product/1278/85305
# 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取
cred = credential.Credential("SecretId", "SecretKey")
# 实例化一个http选项可选的没有特殊需求可以跳过
httpProfile = HttpProfile()
httpProfile.endpoint = "hunyuan.tencentcloudapi.com"
# 实例化一个client选项可选的没有特殊需求可以跳过
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品的client对象,clientProfile是可选的
client = hunyuan_client.HunyuanClient(cred, "", clientProfile)
# 实例化一个请求对象,每个接口都会对应一个request对象
req = models.ChatCompletionsRequest()
params = {
"Model": "hunyuan-lite",
"Messages": st.session_state.messages,
"TopP": top_p,
"Temperature": temperature,
"Stream": True,
}
req.from_json_string(json.dumps(params))
# 返回的resp是一个ChatCompletionsResponse的实例与请求对象对应
resp = client.ChatCompletions(req)
# 输出json格式的字符串回包
response = ''
if isinstance(resp, types.GeneratorType): # 流式响应
for event in resp:
answer = json.loads(event['data'])
response += answer["Choices"][0]['Delta']['Content']
message_placeholder_hunyuan.markdown(response)
if stop_button:
break
st.session_state.messages.append({'Role': 'assistant', 'Content': response})
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
return response
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_hunyuan(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,322 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
# 以下密钥信息从控制台获取
appid = " " # 填写控制台中获取的 APPID 信息
api_secret = " " # 填写控制台中获取的 APISecret 信息
api_key =" " # 填写控制台中获取的 APIKey 信息
with st.sidebar:
with st.expander('模型', expanded=True):
API_model = st.radio('选择:', ('讯飞 - 星火大模型 V1.5', '讯飞 - 星火大模型 V2.0', '讯飞 - 星火大模型 V3.0', '讯飞 - 星火大模型 V3.5'), key='choose_API_model')
if API_model == '讯飞 - 星火大模型 V1.5':
API_model_0 = '星火大模型 V1.5'
elif API_model == '讯飞 - 星火大模型 V2.0':
API_model_0 = '星火大模型 V2.0'
elif API_model == '讯飞 - 星火大模型 V3.0':
API_model_0 = '星火大模型 V3.0'
elif API_model == '讯飞 - 星火大模型 V3.5':
API_model_0 = '星火大模型 V3.5'
st.write('当前模型:'+API_model_0)
with st.expander('参数', expanded=True):
top_k = st.slider('top_k', 1, 6, value=4, step=1, key='top_k_session')
temperature = st.slider('temperature', 0.01, 1.0, value=0.5, step=0.01, key='temperature_session')
def reset_parameter():
st.session_state['top_k_session'] = 4
st.session_state['temperature_session'] = 0.5
reset_parameter_button = st.button('重置', on_click=reset_parameter)
# 云端环境的服务地址
if API_model == '讯飞 - 星火大模型 V1.5':
domain = "general" # v1.5版本
Spark_url = "ws://spark-api.xf-yun.com/v1.1/chat" # v1.5环境的地址
elif API_model == '讯飞 - 星火大模型 V2.0':
domain = "generalv2" # v2.0版本
Spark_url = "ws://spark-api.xf-yun.com/v2.1/chat" # v2.0环境的地址
elif API_model == '讯飞 - 星火大模型 V3.0':
domain = "generalv3" # v3.0版本
Spark_url = "ws://spark-api.xf-yun.com/v3.1/chat" # v3.0环境的地址
elif API_model == '讯飞 - 星火大模型 V3.5':
domain = "generalv3.5" # v3.5版本
Spark_url = "ws://spark-api.xf-yun.com/v3.5/chat" # v3.5环境的地址
import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
from urllib.parse import urlparse
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import websocket # 使用websocket_client
answer = ""
class Ws_Param(object):
# 初始化
def __init__(self, APPID, APIKey, APISecret, Spark_url):
self.APPID = APPID
self.APIKey = APIKey
self.APISecret = APISecret
self.host = urlparse(Spark_url).netloc
self.path = urlparse(Spark_url).path
self.Spark_url = Spark_url
# 生成url
def create_url(self):
# 生成RFC1123格式的时间戳
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# 拼接字符串
signature_origin = "host: " + self.host + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + self.path + " HTTP/1.1"
# 进行hmac-sha256进行加密
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# 将请求的鉴权参数组合为字典
v = {
"authorization": authorization,
"date": date,
"host": self.host
}
# 拼接鉴权参数生成url
url = self.Spark_url + '?' + urlencode(v)
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释比对相同参数时生成的url与自己代码生成的url是否一致
return url
# 收到websocket错误的处理
def on_error(ws, error):
print("### error:", error)
# 收到websocket关闭的处理
def on_close(ws,one,two):
print(" ")
# 收到websocket连接建立的处理
def on_open(ws):
thread.start_new_thread(run, (ws,))
def run(ws, *args):
data = json.dumps(gen_params(appid=ws.appid, domain= ws.domain,question=ws.question))
ws.send(data)
# 收到websocket消息的处理
def on_message(ws, message):
# print(message)
data = json.loads(message)
code = data['header']['code']
if code != 0:
print(f'请求错误: {code}, {data}')
ws.close()
else:
choices = data["payload"]["choices"]
status = choices["status"]
content = choices["text"][0]["content"]
global answer
answer += content
message_placeholder.markdown(answer)
if status == 2:
ws.close()
def gen_params(appid, domain,question):
"""
通过appid和用户的提问来生成请参数
"""
data = {
"header": {
"app_id": appid,
"uid": "1234"
},
"parameter": {
"chat": {
"domain": domain,
"random_threshold": 0.5,
"temperature": temperature,
"top_k": top_k,
"max_tokens": 4096,
"auditing": "default"
}
},
"payload": {
"message": {
"text": question
}
}
}
return data
def main_chat(appid, api_key, api_secret, Spark_url,domain, question):
wsParam = Ws_Param(appid, api_key, api_secret, Spark_url)
websocket.enableTrace(False)
wsUrl = wsParam.create_url()
ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
ws.appid = appid
ws.question = question
ws.domain = domain
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
def getlength(text):
length = 0
for content in text:
temp = content["content"]
leng = len(temp)
length += leng
return length
def checklen(text):
while (getlength(text) > 8000):
del text[0]
return text
def getText(role,content, text):
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
text.append(jsoncon)
return text
prompt_text = st.chat_input("请在这里输入您的命令")
if API_model == '讯飞 - 星火大模型 V1.5':
if "text" not in st.session_state:
st.session_state.text = []
if "messages" not in st.session_state:
st.session_state.messages = []
def clear_all():
st.session_state.messages = []
st.session_state.text = []
if st.session_state.messages == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
if prompt_text:
if st.session_state.messages != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text = getText("user", prompt_text, st.session_state.text)
question = checklen(st.session_state.text)
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
st.session_state.text = getText("assistant", answer, st.session_state.text)
st.session_state.messages.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all)
elif API_model == '讯飞 - 星火大模型 V2.0':
if "text2" not in st.session_state:
st.session_state.text2 = []
if "messages2" not in st.session_state:
st.session_state.messages2 = []
def clear_all2():
st.session_state.messages2 = []
st.session_state.text2 = []
if st.session_state.messages2 == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages2:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
if prompt_text:
if st.session_state.messages2 != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages2.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text2 = getText("user", prompt_text, st.session_state.text2)
question = checklen(st.session_state.text2)
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
st.session_state.text2 = getText("assistant", answer, st.session_state.text2)
st.session_state.messages2.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all2, key='clear2')
elif API_model == '讯飞 - 星火大模型 V3.0':
if "text3" not in st.session_state:
st.session_state.text3 = []
if "messages3" not in st.session_state:
st.session_state.messages3 = []
def clear_all3():
st.session_state.messages3 = []
st.session_state.text3 = []
if st.session_state.messages3 == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages3:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
if prompt_text:
if st.session_state.messages3 != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages3.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text3 = getText("user", prompt_text, st.session_state.text3)
question = checklen(st.session_state.text3)
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
st.session_state.text3 = getText("assistant", answer, st.session_state.text3)
st.session_state.messages3.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all3, key='clear3')
elif API_model == '讯飞 - 星火大模型 V3.5':
if "text4" not in st.session_state:
st.session_state.text4 = []
if "messages4" not in st.session_state:
st.session_state.messages4 = []
def clear_all4():
st.session_state.messages4 = []
st.session_state.text4 = []
if st.session_state.messages4 == []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
for message in st.session_state.messages4:
with st.chat_message(message["role"], avatar=message.get("avatar")):
st.markdown(message["content"])
if prompt_text:
if st.session_state.messages4 != []:
with st.chat_message("user", avatar="user"):
input_placeholder = st.empty()
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
input_placeholder.markdown(prompt_text)
st.session_state.messages4.append({"role": "user", "content": prompt_text, "avatar": "user"})
st.session_state.text4 = getText("user", prompt_text, st.session_state.text4)
question = checklen(st.session_state.text4)
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
st.session_state.text4 = getText("assistant", answer, st.session_state.text4)
st.session_state.messages4.append({"role": "robot", "content": answer, "avatar": "assistant"})
st.rerun()
button_clear = st.button("清空", on_click=clear_all4, key='clear4')

View File

@@ -0,0 +1,73 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
from dashscope import Generation
from dashscope.api_entities.dashscope_response import Role
import dashscope
dashscope.api_key=""
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder_qwen = st.empty()
def response_of_qwen(prompt):
st.session_state.messages.append({'role': Role.USER, 'content': prompt})
responses = Generation.call("qwen-turbo",
messages=st.session_state.messages,
result_format='message',
stream=True,
incremental_output=True,
top_p=top_p,
temperature=temperature,
)
full_content = ''
for response in responses:
full_content += response.output.choices[0]['message']['content']
message_placeholder_qwen.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': response.output.choices[0]['message']['role'],
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_qwen(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,77 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
"""
import streamlit as st
st.set_page_config(
page_title="Chat",
layout='wide'
)
import openai
API_BASE = "https://api.lingyiwanwu.com/v1"
API_KEY = "your key"
with st.sidebar:
with st.expander('参数', expanded=True):
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
def reset_parameter():
st.session_state['top_p_session'] = 0.8
st.session_state['temperature_session'] = 0.85
reset_parameter_button = st.button('重置', on_click=reset_parameter)
prompt = st.chat_input("在这里输入您的命令")
def clear_all():
st.session_state.messages = []
st.session_state.ai_response = []
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'ai_response' not in st.session_state:
st.session_state.ai_response = []
for ai_response in st.session_state.ai_response:
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
st.markdown(ai_response["content"])
prompt_placeholder = st.chat_message("user", avatar='user')
with st.chat_message("robot", avatar="assistant"):
message_placeholder = st.empty()
def response_of_yi(prompt):
st.session_state.messages.append({'role': 'user', 'content': prompt})
client = openai.OpenAI(
api_key=API_KEY,
base_url=API_BASE
)
completion = client.chat.completions.create(
model="yi-spark",
messages=st.session_state.messages,
stream=True,
temperature=temperature,
top_p=top_p,
)
full_content = ''
for chunk in completion:
response = chunk.choices[0].delta.content or ""
full_content += response
message_placeholder.markdown(full_content)
if stop_button:
break
st.session_state.messages.append({'role': 'assistant',
'content': full_content})
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
return full_content
if prompt:
prompt_placeholder.markdown(prompt)
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
stop = st.empty()
stop_button = stop.button('停止', key='break_response')
response_of_yi(prompt)
stop.empty()
button_clear = st.button("清空", on_click=clear_all, key='clear')

View File

@@ -0,0 +1,22 @@
from flask import Flask, request
app = Flask(__name__)
def get_response(user_input):
response = f"你说了'{user_input}',我想了想。"
return response
@app.route('/', methods=['POST'])
def API_server():
try:
data = request.get_json() # 从请求的 JSON 数据中获取用户输入
user_input = data.get('prompt', '') # 获取 'prompt' 字段
except Exception as e:
return '请求错误!请联系 API 管理员。' # 如果解析失败,则返回错误信息
if not user_input:
return "请求错误!请联系 API 管理员。" # 如果没有输入,则返回错误
ai_response = get_response(user_input)
return ai_response
if __name__ == '__main__':
app.run(debug=True, threaded=True, port=123) # 完成测试后推荐把 debug=True 关闭,否则当文件夹内部有发生文件改变时可能会重载运行

View File

@@ -0,0 +1,11 @@
import requests
url = "http://localhost:123" # API 地址
data = {
"prompt": "Hello, how are you?" # 请求数据prompt 为用户输入
}
response = requests.post(url, json=data) # 发送 POST 请求,传递 JSON 数据
if response.status_code == 200: # 检查响应是否成功
print(response.text) # 直接获取并打印返回的完整响应
else:
print(f"请求失败,状态码: {response.status_code}")

View File

@@ -0,0 +1,24 @@
from flask import Flask, Response, request
app = Flask(__name__)
def get_response(user_input):
import time
ai_response = f"你说了'{user_input}',我想了想。"
for char in ai_response:
yield f"{char}\n\n"
time.sleep(0.2)
@app.route('/', methods=['POST'])
def API_server():
try:
data = request.get_json() # 从请求的 JSON 数据中获取用户输入
user_input = data.get('prompt', '') # 获取 'prompt' 字段
except Exception as e:
return '请求错误!请联系 API 管理员。' # 如果解析失败,则返回错误信息
if not user_input:
return "请求错误!请联系 API 管理员。" # 如果没有输入,则返回错误
return Response(get_response(user_input), content_type='text/event-stream') # 返回流式响应
if __name__ == '__main__':
app.run(debug=True, threaded=True, port=123) # 完成测试后推荐把 debug=True 关闭,否则当文件夹内部有发生文件改变时可能会重载运行

View File

@@ -0,0 +1,13 @@
import requests
url = "http://localhost:123" # API 地址
data = {
"prompt": "Hello, how are you?" # 请求数据prompt 为用户输入
}
response = requests.post(url, json=data, stream=True) # 发送 POST 请求,传递 JSON 数据并启用流式响应
if response.status_code == 200: # 检查响应是否成功
for line in response.iter_lines(): # 逐步读取并打印流式响应
if line:
print(line.decode('utf-8'), end='', flush=True) # 解码并打印每一行流式返回的数据
else:
print(f"请求失败,状态码: {response.status_code}")

View File

@@ -9,8 +9,8 @@ response = ollama.chat(model="llama3.2:latest", messages=[{"role": "user", "cont
for part in response:
print(part['message']['content'], end='', flush=True)
# 流式输出,模型后台常驻需要手动 ollama stop 关闭
# 流式输出,同时设置模型后台常驻需要手动 ollama stop 关闭
import ollama
response = ollama.chat(model="llama3.2:latest", messages=[{"role": "user", "content": "你好"}], stream=True, keep_alive=-1)
for part in response:
print(part['message']['content'], end='', flush=True)
print(part['message']['content'], end='', flush=True)

View File

@@ -0,0 +1,30 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/44839
"""
import os
os.environ["OMP_NUM_THREADS"] = "1" # KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting this environment variable
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
X, y = make_blobs(n_samples=300, centers=4, random_state=42) # 生成示例数据(四类)
print(X.shape)
print(y.shape)
plt.scatter(X[:, 0], X[:, 1]) # 显示数据
plt.show()
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis') # 通过颜色显示数据原有的标签
plt.show()
kmeans = KMeans(n_clusters=3, random_state=42) # 进行 KMeans 聚类(这里分为三类)
kmeans.fit(X)
labels = kmeans.labels_ # 获取聚类的标签
print(labels.shape)
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis') # 绘制聚类结果
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red', marker='X') # 绘制聚类中心
plt.title('KMeans Result')
plt.show()

View File

@@ -0,0 +1,11 @@
import guan
# 在某个目录中寻找所有 Git 项目
git_repository_array = guan.find_git_repositories('D:/data')
guan.print_array(git_repository_array)
print('\n---\n')
# 获取未 git commit 的 Git 项目
git_repository_array_to_commit = guan.get_git_repositories_to_commit(git_repository_array)
guan.print_array(git_repository_array_to_commit)
print('\n---\n')

View File

@@ -0,0 +1,20 @@
import guan
# 在某个目录中寻找所有 Git 项目
git_repository_array = guan.find_git_repositories('D:/data')
guan.print_array(git_repository_array)
print('\n---\n')
# 获取未 git commit 的 Git 项目
git_repository_array_to_commit = guan.get_git_repositories_to_commit(git_repository_array)
guan.print_array(git_repository_array_to_commit)
print('\n---\n')
import os
# 完成 git commit
for directory in git_repository_array_to_commit:
print(directory)
os.chdir(directory) # 进入 Git 仓库
os.system('git add .') # 添加文件到暂存区
os.system('git commit -m update') # 将暂存区内容添加到仓库中

View File

@@ -0,0 +1,24 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45135
"""
class Atom:
def __init__(self, name='atom', index=0, x=0, y=0, z=0):
self.name = name
self.index = index
self.x = x
self.y = y
self.z = z
atom_object_list = []
index = 0
for i0 in range(3):
for j0 in range(3):
atom = Atom(index=index, x=i0, y=j0)
atom_object_list.append(atom)
index += 1
print(atom_object_list)
for atom_object in atom_object_list:
print([atom_object.name, atom_object.index, atom_object.x, atom_object.y, atom_object.z])

View File

@@ -0,0 +1,33 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45135
"""
class Atom:
def __init__(self, name='atom', index=0, x=0, y=0, z=0):
self.name = name
self.index = index
self.x = x
self.y = y
self.z = z
atom_object_list = []
index = 0
for i0 in range(3):
for j0 in range(3):
atom = Atom(index=index, x=i0, y=j0)
atom_object_list.append(atom)
index += 1
print(atom_object_list)
atom_dict_list = []
for atom_object in atom_object_list:
atom_dict= {
'name': atom_object.name,
'index': atom_object.index,
'x': atom_object.x,
'y': atom_object.y,
'z': atom_object.z,
}
atom_dict_list.append(atom_dict)
print(atom_dict_list)

View File

@@ -0,0 +1,22 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45135
"""
atom_dict_list = []
index = 0
for i0 in range(3):
for j0 in range(3):
atom_dict= {
'name': 'atom',
'index': index,
'x': i0,
'y': j0,
'z': 0,
}
atom_dict_list.append(atom_dict)
index += 1
print(atom_dict_list)
for atom_dict in atom_dict_list:
print([atom_dict['name'], atom_dict['index'], atom_dict['x'], atom_dict['y'], atom_dict['z']])

View File

@@ -0,0 +1,59 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45201
"""
# False 布尔值
print(bool(False))
print(bool(0))
print(bool(0.0))
print(bool(0.0j))
print(bool(None))
print(bool(''))
print(bool([]))
print(bool({}))
print(bool(()))
print(bool(set()))
print()
# 虽然布尔值相同,但只有 False, 0, 0.0, 0.0j 和 False 等价
print(False==False) # True
print(False==0) # True
print(False==0.0) # True
print(False==0.0j) # True
print(False==None) # False
print(False=='') # False
print(False==[]) # False
print(False=={}) # False
print(False==()) # False
print(False==set()) # False
print()
def true_or_false(a):
if a:
print('True')
else:
print('False')
# 'if' 环境中的 False 测试
true_or_false(False)
true_or_false(0)
true_or_false(0.0)
true_or_false(0.0j)
true_or_false(None)
true_or_false('')
true_or_false([])
true_or_false({})
true_or_false(())
true_or_false(set())
print()
# 'if' 环境中的 True 测试
true_or_false(True)
true_or_false('True')
true_or_false('False')
true_or_false('a')
true_or_false(1)
true_or_false(-1)
true_or_false(2)
print()

View File

@@ -0,0 +1,106 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45275
"""
import numpy as np
import time
import sys
from numba import jit
n_array = np.concatenate((np.arange(1000, 10000, 1000), np.arange(10000, 40000, 10000)))
print(f'n_array={n_array}\n')
@jit
def numba_test(C, n):
for i0 in range(n):
for j0 in range(n):
C[i0, j0] = np.random.rand()
return C
for n in n_array:
print(f'n={n}')
A = np.random.rand(n, n)
B = np.random.rand(n, n)
C = np.random.rand(n, n)
# 矩阵占用内存
size = sys.getsizeof(C)
print(f'矩阵占用内存: {size/(1024*1024):.2f} MB')
# 矩阵的迹
start_time = time.time()
trace_A = np.trace(A)
trace_time = time.time() - start_time
print(f"矩阵的迹时间: {trace_time:.3f}")
# 矩阵转置
start_time = time.time()
A_T = A.T
transpose_time = time.time() - start_time
print(f"矩阵转置时间: {transpose_time:.3f}")
# 矩阵加法
start_time = time.time()
C = A + B
add_time = time.time() - start_time
print(f"矩阵加法时间: {add_time:.3f}")
# numba for 循环赋值
start_time = time.time()
numba_test(C, n)
create_time = time.time() - start_time
print(f"numba for 循环赋值时间: {create_time:.3f}")
# 矩阵创建
start_time = time.time()
C = np.random.rand(n, n)
create_time = time.time() - start_time
print(f"矩阵创建时间: {create_time:.3f}")
# for 循环赋值
start_time = time.time()
for i0 in range(n):
for j0 in range(n):
C[i0, j0] = np.random.rand()
create_time = time.time() - start_time
print(f"for 循环赋值时间: {create_time:.3f}")
# 矩阵行列式
start_time = time.time()
det_A = np.linalg.det(A)
det_time = time.time() - start_time
print(f"矩阵行列式时间: {det_time:.3f}")
# 矩阵乘法
start_time = time.time()
C = np.dot(A, B)
multiply_time = time.time() - start_time
print(f"矩阵乘法时间: {multiply_time:.3f}")
# 矩阵求逆
start_time = time.time()
inv_A = np.linalg.inv(A)
inv_time = time.time() - start_time
print(f"矩阵求逆时间: {inv_time:.3f}")
# 矩阵的秩
start_time = time.time()
rank_A = np.linalg.matrix_rank(A)
rank_time = time.time() - start_time
print(f"矩阵的秩时间: {rank_time:.3f}")
# 矩阵的特征值
start_time = time.time()
eigenvalues_A = np.linalg.eigvals(A)
eigen_time = time.time() - start_time
print(f"矩阵特征值时间: {eigen_time:.3f}")
# 矩阵的特征值和特征向量
start_time = time.time()
eigenvalues_A, eigenvector_A = np.linalg.eig(A)
eigen_time = time.time() - start_time
print(f"矩阵特征值和特征向量时间: {eigen_time:.3f}")
print()

View File

@@ -0,0 +1,11 @@
import guan # https://py.guanjihuan.com | install: pip install --upgrade guan
import numpy as np
cpu_num_array = np.arange(1, 9)
sh_filename = 'task'
task_name = 'test'
py_filename='matrix_running_time_for_different_num_of_cpu_cores'
for cpu_num in cpu_num_array:
guan.make_sh_file_for_qsub(sh_filename=sh_filename+'_'+str(cpu_num), command_line=f'python {py_filename}.py', cpu_num=cpu_num, task_name=task_name+'_'+str(cpu_num), cd_dir=0)

View File

@@ -0,0 +1,35 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45324
"""
import numpy as np
import time
n = 1000
test_times = 20
# 矩阵乘法
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
B = np.random.rand(n, n)
C = np.dot(A, B)
multiply_time = (time.time() - start_time)/test_times
print(f"矩阵乘法时间: {multiply_time:.3f}")
# 矩阵求逆
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
inv_A = np.linalg.inv(A)
inv_time = (time.time() - start_time)/test_times
print(f"矩阵求逆时间: {inv_time:.3f}")
# 矩阵的特征值和特征向量
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
eigenvalues_A, eigenvector_A = np.linalg.eig(A)
eigen_time = (time.time() - start_time)/test_times
print(f"矩阵特征值和特征向量时间: {eigen_time:.3f}")

View File

@@ -0,0 +1,7 @@
import numpy as np
import os
cpu_num_array = np.arange(1, 9)
for cpu_num in cpu_num_array:
os.system(f'qsub task_{cpu_num}.sh')

View File

@@ -0,0 +1,14 @@
import guan # https://py.guanjihuan.com | install: pip install --upgrade guan
import numpy as np
import os
cpu_num_array = np.arange(1, 33)
py_filename='matrix_running_time_for_different_num_of_cpu_cores_writing_into_files'
current_directory = os.getcwd()
for cpu_num in cpu_num_array:
guan.make_directory(f'./task_{cpu_num}')
os.system(f'cp ./{py_filename}.py ./task_{cpu_num}/{py_filename}.py')
os.system(f'cd {current_directory}/task_{cpu_num}')
guan.make_sh_file_for_qsub(sh_filename=f'./task_{cpu_num}/task_{cpu_num}', command_line=f'python {py_filename}.py', cpu_num=cpu_num, task_name=f'test_{cpu_num}', cd_dir=0)

View File

@@ -0,0 +1,39 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45324
"""
import numpy as np
import time
import pickle
n = 1000
test_times = 20
# 矩阵乘法
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
B = np.random.rand(n, n)
C = np.dot(A, B)
multiply_time = (time.time() - start_time)/test_times
with open(f'multiply_time_n={n}.pkl', 'wb') as f:
pickle.dump(multiply_time, f)
# 矩阵求逆
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
inv_A = np.linalg.inv(A)
inv_time = (time.time() - start_time)/test_times
with open(f'inv_time_n={n}.pkl', 'wb') as f:
pickle.dump(inv_time, f)
# 矩阵的特征值和特征向量
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
eigenvalues_A, eigenvector_A = np.linalg.eig(A)
eigen_time = (time.time() - start_time)/test_times
with open(f'eigen_time_n={n}.pkl', 'wb') as f:
pickle.dump(eigen_time, f)

View File

@@ -0,0 +1,91 @@
import matplotlib.pyplot as plt
# from matplotlib.ticker import MultipleLocator
import numpy as np
import pickle
cpu_num_array = np.arange(1, 33)
n = 1000
time_array_1 = []
for cpu_num in cpu_num_array:
with open(f'./task_{cpu_num}/multiply_time_n={n}.pkl', 'rb') as f:
data = pickle.load(f)
time_array_1.append(data)
fig, ax = plt.subplots()
ax.set_title('np.dot()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Time (s)')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_1, '-o', )
plt.savefig(f'multiply_time_n={n}.jpg')
# plt.show()
time_0 = time_array_1[0]
for i0 in range(len(time_array_1)):
time_array_1[i0] = time_0/time_array_1[i0]
fig, ax = plt.subplots()
ax.set_title('np.dot()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Ratio')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_1, '-o', )
plt.plot(cpu_num_array, cpu_num_array, '--r')
plt.savefig(f'multiply_time_ratio_n={n}.jpg')
# plt.show()
time_array_2 = []
for cpu_num in cpu_num_array:
with open(f'./task_{cpu_num}/inv_time_n={n}.pkl', 'rb') as f:
data = pickle.load(f)
time_array_2.append(data)
fig, ax = plt.subplots()
ax.set_title('np.linalg.inv()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Time (s)')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_2, '-o', )
plt.savefig(f'inv_time_n={n}.jpg')
# plt.show()
time_0 = time_array_2[0]
for i0 in range(len(time_array_2)):
time_array_2[i0] = time_0/time_array_2[i0]
fig, ax = plt.subplots()
ax.set_title('np.linalg.inv()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Ratio')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_2, '-o', )
plt.plot(cpu_num_array, cpu_num_array, '--r')
plt.savefig(f'inv_time_ratio_n={n}.jpg')
# plt.show()
time_array_3 = []
for cpu_num in cpu_num_array:
with open(f'./task_{cpu_num}/eigen_time_n={n}.pkl', 'rb') as f:
data = pickle.load(f)
time_array_3.append(data)
fig, ax = plt.subplots()
ax.set_title('np.linalg.eig()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Time (s)')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_3, '-o', )
plt.savefig(f'eigen_time_n={n}.jpg')
# plt.show()
time_0 = time_array_3[0]
for i0 in range(len(time_array_3)):
time_array_3[i0] = time_0/time_array_3[i0]
fig, ax = plt.subplots()
ax.set_title('np.linalg.eig()')
ax.set_xlabel('Number of CPU cores')
ax.set_ylabel('Ratio')
# ax.xaxis.set_major_locator(MultipleLocator(1))
plt.plot(cpu_num_array, time_array_3, '-o', )
plt.plot(cpu_num_array, cpu_num_array, '--r')
plt.savefig(f'eigen_time_ratio_n={n}.jpg')
# plt.show()

View File

@@ -0,0 +1,4 @@
#!/bin/sh
#PBS -N plot
#PBS -l nodes=1:ppn=1
python plot_result_of_running_time_by_reading_files.py

View File

@@ -0,0 +1,9 @@
import numpy as np
import os
cpu_num_array = np.arange(1, 33)
current_directory = os.getcwd()
for cpu_num in cpu_num_array:
os.system(f'cd {current_directory}/task_{cpu_num} && qsub {current_directory}/task_{cpu_num}/task_{cpu_num}.sh')

View File

@@ -0,0 +1,56 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45681
"""
import numpy as np
def hamiltonian(width=2, length=2): # 方格子哈密顿量
h = np.zeros((width*length, width*length))
# y方向的跃迁
for x in range(length):
for y in range(width-1):
h[x*width+y, x*width+y+1] = 1
h[x*width+y+1, x*width+y] = 1
# x方向的跃迁
for x in range(length-1):
for y in range(width):
h[x*width+y, (x+1)*width+y] = 1
h[(x+1)*width+y, x*width+y] = 1
return h
# from numba import jit
# @jit(nopython=True)
def total_DOS_for_Fermi_energy_array(Fermi_energy_array, h, broadening):
dim_energy = Fermi_energy_array.shape[0]
dim = h.shape[0]
total_DOS_array = np.zeros((dim_energy))
i0 = 0
for Fermi_energy in Fermi_energy_array:
green = np.linalg.inv((Fermi_energy+broadening*1j)*np.eye(dim)-h)
total_DOS = -np.trace(np.imag(green))/np.pi # 通过格林函数求得总态密度
total_DOS_array[i0] = total_DOS
i0 += 1
return total_DOS_array
def main():
plot_precision = 0.01 # 画图的精度/积分的精度
Fermi_energy_array = np.arange(-5, 5, plot_precision)
h = hamiltonian()
# import time
# begin_time = time.time()
for broadening in [0.5, 0.1, 0.01, 0.001, 0.0001]:
total_DOS_array = total_DOS_for_Fermi_energy_array(Fermi_energy_array, h, broadening)
sum_up = np.sum(total_DOS_array)*plot_precision
print(f'Broadening为{broadening}时的积分结果:{sum_up}')
# import matplotlib.pyplot as plt
# plt.plot(Fermi_energy_array, total_DOS_array/sum_up, '-o')
# plt.plot(Fermi_energy_array, total_DOS_array, '-o')
# plt.xlabel('Fermi energy')
# plt.ylabel('Total DOS')
# plt.show()
# end_time = time.time()
# print(end_time-begin_time)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,41 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45873
"""
import numpy as np
import sys
n_array = np.concatenate((np.arange(1, 10, 1),
np.arange(10, 100, 10),
np.arange(100, 1000, 100),
np.arange(1000, 10000, 1000),
np.arange(10000, 100000, 10000),
np.arange(100000, 1000000, 100000)))
for n in n_array:
matrix = np.zeros((n, n)) # 双精度浮点数 float64 一个数据占用 8B
# matrix = np.zeros((n, n), dtype=complex) # 双精度复数 complex128 一个数据占用 16B
# matrix = np.zeros((n, n), dtype=np.float32) # 单精度浮点数 float32 一个数据占用 4B
# matrix = np.zeros((n, n), dtype=int) # 整数 int32 一个数据占用 4B
if n==1:
print(type(matrix[0, 0]), '\n')
size0 = matrix.nbytes # 矩阵数据内存占用
size = sys.getsizeof(matrix) # 矩阵总的内存占用
print(f'矩阵 N={n}')
if size<1024:
print(f'数据内存占用: {size0:.2f} B')
print(f'总的内存占用: {size:.2f} B')
elif 1024<=size<1024*1024:
print(f'数据内存占用: {size0/1024:.2f} KB')
print(f'总的内存占用: {size/1024:.2f} KB')
elif 1024*1024<=size<1024*1024*1024:
print(f'数据内存占用: {size0/(1024*1024):.2f} MB')
print(f'总的内存占用: {size/(1024*1024):.2f} MB')
elif 1024*1024*1024<=size<1024*1024*1024*1024:
print(f'数据内存占用: {size0/(1024*1024*1024):.2f} GB')
print(f'总的内存占用: {size/(1024*1024*1024):.2f} GB')
else:
print(f'数据内存占用: {size0/(1024*1024*1024*1024):.2f} TB')
print(f'总的内存占用: {size/(1024*1024*1024*1024):.2f} TB')
print()

View File

@@ -0,0 +1,95 @@
! This code is supported by the website: https://www.guanjihuan.com
! The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45966
module random_matrix_mod
implicit none
contains
subroutine generate_random_matrix(n, A)
integer, intent(in) :: n
double precision, allocatable, intent(out) :: A(:,:)
integer :: ierr
allocate(A(n, n), stat=ierr)
if (ierr /= 0) stop "内存分配失败"
call init_random_seed()
call random_number(A)
end subroutine
subroutine init_random_seed()
integer :: i, n, clock, ierr
integer, allocatable :: seed(:)
call random_seed(size = n)
allocate(seed(n), stat=ierr)
if (ierr /= 0) stop "种子分配失败"
call system_clock(count=clock)
seed = clock + 37 * [(i - 1, i = 1, n)]
call random_seed(put=seed)
deallocate(seed)
end subroutine
end module
program main
use random_matrix_mod
use f95_precision
use blas95
use lapack95
implicit none
integer, allocatable :: index1(:)
integer n, i, j, info, ierr, stage, start, end_val, step, count_start, count_end, count_rate, test_0, test_times
double precision, allocatable :: A(:,:)
double precision time_used
test_times = 20
! 定义不同阶段的参数
do stage = 1, 3
select case(stage)
case(1) ! 第一阶段100-1000步长100
start = 100
end_val = 1000
step = 100
case(2) ! 第二阶段2000-10000步长1000
start = 2000
end_val = 10000
step = 1000
case(3) ! 第三阶段20000-30000步长10000
start = 20000
end_val = 30000
step = 10000
end select
n = start
do while (n <= end_val)
allocate(index1(n), stat=ierr)
call system_clock(count_start, count_rate)
test_0 = 1
do while (test_0 <= test_times)
call generate_random_matrix(n, A)
call getrf(A, index1, info); call getri(A, index1, info) ! 使用 getrf 和 getri 对矩阵求逆。这时候 A 不再是原来的矩阵了,而是求逆后的矩阵。
deallocate(A, stat=ierr)
test_0 = test_0 + 1
end do
call system_clock(count_end)
! 打印计算时间
if (count_rate > 0) then
time_used = real(count_end - count_start) / real(count_rate) / test_times
write(*, '(a, I6, a, f12.6, a)') 'n = ', n, ' 的计算时间: ', time_used, ' 秒'
else
write(*,*) "无法获取计算时间"
endif
deallocate(index1, stat=ierr)
n = n + step
end do
end do
end program

View File

@@ -0,0 +1,20 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/45966
"""
import numpy as np
import time
n_array = np.concatenate((np.arange(100, 1000, 100),
np.arange(1000, 10000, 1000),
np.arange(10000, 40000, 10000)))
for n in n_array:
test_times = 20
start_time = time.time()
for _ in range(test_times):
A = np.random.rand(n, n)
inv_A = np.linalg.inv(A)
inv_time = (time.time() - start_time)/test_times
print(f"n = {n} 的计算时间: {inv_time:.6f}")

View File

@@ -0,0 +1,4 @@
#!/bin/sh
#PBS -N fortran
#PBS -l nodes=1:ppn=24
./a.exe

Some files were not shown because too many files have changed in this diff Show More