Compare commits
70 Commits
2d98dd5b07
...
master
Author | SHA1 | Date | |
---|---|---|---|
5b00715f84 | |||
b411789fd3 | |||
e58a4612ef | |||
62b8b4a11b | |||
cb35e77c38 | |||
5e77dc87e6 | |||
b3bfab1569 | |||
8131c17258 | |||
6d7da71eda | |||
00f46a79c0 | |||
e5f835f5de | |||
15764c8052 | |||
dcf1f324ce | |||
8423da7ddc | |||
ef1f0522f4 | |||
0db8b55a3f | |||
35c87df603 | |||
984fdf71df | |||
f3fa228754 | |||
a8bb557a65 | |||
71f3e4b758 | |||
848f34ef89 | |||
5327c384ee | |||
02a3f2b279 | |||
8d726fe9e6 | |||
fe7bc6db82 | |||
07daddb196 | |||
388be36ab1 | |||
1a5d874ac0 | |||
b85ae814c3 | |||
7b07bd90bc | |||
2bb0a1ab9b | |||
ede8d08443 | |||
652b32fdde | |||
90d2b600e4 | |||
7635346f96 | |||
3776c2c12a | |||
7fabcff909 | |||
cc7c4200f5 | |||
4ea5c623cb | |||
43a9c1a2eb | |||
eb11e84063 | |||
65d9777440 | |||
249d292bd8 | |||
cd9d66c857 | |||
ef45071a38 | |||
a68778dd2e | |||
e7e929ff08 | |||
33e1dc5118 | |||
0bcd8ce52e | |||
cbedb87697 | |||
60b9410584 | |||
9cad8f4a9d | |||
5f3d81fc6c | |||
ef6bb4e9d2 | |||
7265410df6 | |||
e3c4a49292 | |||
d6d7b6d6a1 | |||
3cde4d784f | |||
8a83969b07 | |||
73adde082b | |||
0027293f2b | |||
ebf86b30eb | |||
5be00bb720 | |||
2b5bfab114 | |||
81565244f1 | |||
fea2d76548 | |||
047c2353b6 | |||
d2b76e2cd7 | |||
67d4c9e5a4 |
@@ -9,14 +9,14 @@
|
||||
2. 列表内容
|
||||
3. 列表内容
|
||||
|
||||
无序列表:用 + - * 任何一种都可以。为了不和其他记号重复,个人倾向于用 + 。
|
||||
无序列表:用 + - * 任何一种都可以。这里推荐使用 - 。
|
||||
|
||||
+ 列表内容
|
||||
+ 嵌套前面加几个空格。为了保险起见,个人倾向于用四个空格或一个Tab。
|
||||
+ 列表内容
|
||||
+ 列表嵌套
|
||||
+ 列表嵌套
|
||||
+ 列表嵌套
|
||||
- 列表内容
|
||||
- 嵌套前面加几个空格。这里推荐使用四个空格缩进,兼容性最好。
|
||||
- 列表内容
|
||||
- 列表嵌套
|
||||
- 列表嵌套
|
||||
- 列表嵌套
|
||||
|
||||
*倾斜:前后一个星号*
|
||||
|
||||
@@ -30,15 +30,18 @@
|
||||
print('hello world')
|
||||
```
|
||||
|
||||
分割线:三个或者三个以上的 - 或 * 。为了不和其他记号重复,个人倾向于用 --- 。
|
||||
分割线:三个或者三个以上的 - 或 * 。这里推荐使用 --- 。
|
||||
|
||||
---
|
||||
|
||||
在Markdown中空一行可采用以下符号。该符号为HTML中的符号,在Markdown中也是支持的。
|
||||
换行有:
|
||||
|
||||
<br />
|
||||
- 通用场景:优先用两个换行符,即按两次回车
|
||||
- 行内换行:用两个空格,
|
||||
加上换行符
|
||||
- 复杂排版:必要时用 <br> 或其他 HTML 标签
|
||||
|
||||
以下是表格的书写形式。其中,第二行用一个横杆也是可以。为了保险起见,个人倾向于用三个横杆。
|
||||
以下是表格的书写形式。其中,第二行用一个横杆也是可以。这里推荐使用三个横杆。
|
||||
|
||||
| 右对齐 | 居中对齐 | 左对齐 |
|
||||
| ---: | :---: | :--- |
|
||||
|
@@ -9,7 +9,7 @@
|
||||
\author{作者名字\inst{1},作者名字\inst{2}} %作者
|
||||
\institute{\inst{1}第一个单位\and\inst{2}第二个单位} %这里的\and有换行的效果
|
||||
\date{\today} %时间(默认也会显示)
|
||||
\logo{\includegraphics[height=1.0cm]{1.jpg}} %右下角的小log
|
||||
% \logo{\includegraphics[height=1.0cm]{1.jpg}} %右下角的小log
|
||||
|
||||
\begin{document} %正文开始
|
||||
\begin{frame} %相当于ppt里的一页
|
||||
|
@@ -3,11 +3,11 @@
|
||||
|
||||
% 陈数高效法
|
||||
clear;clc;
|
||||
n=1000 % 积分密度
|
||||
n=100 % 积分密度
|
||||
delta=2*pi/n;
|
||||
C=0;
|
||||
for kx=-pi:(2*pi/n):pi
|
||||
for ky=-pi:(2*pi/n):pi
|
||||
for kx=-pi:(2*pi/n):pi-(2*pi/n)
|
||||
for ky=-pi:(2*pi/n):pi-(2*pi/n)
|
||||
VV=get_vector(HH(kx,ky));
|
||||
Vkx=get_vector(HH(kx+delta,ky)); % 略偏离kx的波函数
|
||||
Vky=get_vector(HH(kx,ky+delta)); % 略偏离ky的波函数
|
||||
|
@@ -0,0 +1,31 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/4536
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import time
|
||||
|
||||
def run_proc(name): # 要执行的代码
|
||||
start_time = time.perf_counter()
|
||||
time.sleep(2)
|
||||
end_time = time.perf_counter()
|
||||
print ('Process id running on %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# 循环创建进程
|
||||
processes = []
|
||||
for i in range(4):
|
||||
p = multiprocessing.Process(target=run_proc, args=(f'job{i}',))
|
||||
processes.append(p)
|
||||
p.start()
|
||||
|
||||
# 等待所有进程完成
|
||||
for p in processes:
|
||||
p.join()
|
||||
|
||||
end_time = time.perf_counter()
|
||||
print('运行时间(s)=', (end_time-start_time))
|
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/4536
|
||||
"""
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import time
|
||||
|
||||
def run_proc(name):
|
||||
start_time = time.perf_counter()
|
||||
time.sleep(2)
|
||||
end_time = time.perf_counter()
|
||||
print ('Process id running on %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
|
||||
return name
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_time = time.perf_counter()
|
||||
with multiprocessing.Pool() as pool:
|
||||
results = pool.map(run_proc, [f"task {i}" for i in range(64)])
|
||||
end_time = time.perf_counter()
|
||||
print(results)
|
||||
print(end_time - start_time)
|
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/10890
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
a = [[ 0 , 0 , 1.5 , 0.32635182-0.98480775j],
|
||||
[0 , 0 , -0.32635182-0.98480775j, 1.5 ],
|
||||
[ 1.5 , -0.32635182+0.98480775j ,0, 0 ],
|
||||
[ 0.32635182+0.98480775j , 1.5 , 0, 0 ]]
|
||||
|
||||
def Schmidt_orthogonalization(eigenvector):
|
||||
num = eigenvector.shape[1]
|
||||
for i in range(num):
|
||||
for i0 in range(i):
|
||||
eigenvector[:, i] = eigenvector[:, i] - eigenvector[:, i0]*np.dot(eigenvector[:, i].transpose().conj(), eigenvector[:, i0])/(np.dot(eigenvector[:, i0].transpose().conj(),eigenvector[:, i0]))
|
||||
eigenvector[:, i] = eigenvector[:, i]/np.linalg.norm(eigenvector[:, i])
|
||||
return eigenvector
|
||||
|
||||
def verify_orthogonality(vectors):
|
||||
identity = np.eye(vectors.shape[1])
|
||||
product = np.dot(vectors.T.conj(), vectors)
|
||||
return np.allclose(product, identity)
|
||||
|
||||
# 对 np.linalg.eigh() 的特征向量正交化
|
||||
|
||||
E, v = np.linalg.eigh(a)
|
||||
print(verify_orthogonality(v))
|
||||
|
||||
v1 = Schmidt_orthogonalization(v)
|
||||
print(verify_orthogonality(v1))
|
||||
|
||||
from scipy.linalg import orth
|
||||
v2 = orth(v)
|
||||
print(verify_orthogonality(v2))
|
||||
|
||||
v3, S, Vt = np.linalg.svd(v)
|
||||
print(verify_orthogonality(v3))
|
||||
|
||||
v4, R = np.linalg.qr(v)
|
||||
print(verify_orthogonality(v4))
|
||||
|
||||
print()
|
||||
|
||||
|
||||
# 对 np.linalg.eig() 的特征向量正交化
|
||||
|
||||
E, v = np.linalg.eig(a)
|
||||
print(verify_orthogonality(v))
|
||||
|
||||
v1 = Schmidt_orthogonalization(v)
|
||||
print(verify_orthogonality(v1))
|
||||
|
||||
from scipy.linalg import orth
|
||||
v2 = orth(v)
|
||||
print(verify_orthogonality(v2))
|
||||
|
||||
v3, S, Vt = np.linalg.svd(v)
|
||||
print(verify_orthogonality(v3))
|
||||
|
||||
v4, R = np.linalg.qr(v)
|
||||
print(verify_orthogonality(v4))
|
7
2021.05.07_Quantum_Espresso/silicon/command.txt
Normal file
7
2021.05.07_Quantum_Espresso/silicon/command.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
pw.x < pw.scf.silicon_bands.in > pw.scf.silicon_bands.out
|
||||
|
||||
pw.x < pw.bands.silicon.in > pw.bands.silicon.out
|
||||
|
||||
bands.x < pp.bands.silicon.in > pp.bands.silicon.out
|
||||
|
||||
python plot_bands.py
|
30
2021.05.07_Quantum_Espresso/silicon/plot_bands.py
Normal file
30
2021.05.07_Quantum_Espresso/silicon/plot_bands.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
plt.rcParams["figure.dpi"]=150
|
||||
plt.rcParams["figure.facecolor"]="white"
|
||||
plt.rcParams["figure.figsize"]=(8, 6)
|
||||
|
||||
# load data
|
||||
data = np.loadtxt('./si_bands.dat.gnu')
|
||||
|
||||
k = np.unique(data[:, 0])
|
||||
bands = np.reshape(data[:, 1], (-1, len(k)))
|
||||
|
||||
for band in range(len(bands)):
|
||||
plt.plot(k, bands[band, :], linewidth=1, alpha=0.5, color='k')
|
||||
plt.xlim(min(k), max(k))
|
||||
|
||||
# Fermi energy
|
||||
plt.axhline(6.6416, linestyle=(0, (5, 5)), linewidth=0.75, color='k', alpha=0.5)
|
||||
# High symmetry k-points (check bands_pp.out)
|
||||
plt.axvline(0.8660, linewidth=0.75, color='k', alpha=0.5)
|
||||
plt.axvline(1.8660, linewidth=0.75, color='k', alpha=0.5)
|
||||
plt.axvline(2.2196, linewidth=0.75, color='k', alpha=0.5)
|
||||
# text labels
|
||||
plt.xticks(ticks= [0, 0.8660, 1.8660, 2.2196, 3.2802], \
|
||||
labels=['L', '$\Gamma$', 'X', 'U', '$\Gamma$'])
|
||||
plt.ylabel("Energy (eV)")
|
||||
plt.text(2.3, 5.6, 'Fermi energy')
|
||||
plt.savefig('si_bands.jpg')
|
||||
plt.show()
|
13
2021.05.07_Quantum_Espresso/silicon/pp.bands.silicon.in
Normal file
13
2021.05.07_Quantum_Espresso/silicon/pp.bands.silicon.in
Normal file
@@ -0,0 +1,13 @@
|
||||
&BANDS
|
||||
prefix = 'silicon'
|
||||
outdir = './tmp/'
|
||||
filband = 'si_bands.dat'
|
||||
/
|
||||
|
||||
K_POINTS {crystal_b}
|
||||
5
|
||||
0.0000 0.5000 0.0000 20 !L
|
||||
0.0000 0.0000 0.0000 30 !G
|
||||
-0.500 0.0000 -0.500 10 !X
|
||||
-0.375 0.2500 -0.375 30 !U
|
||||
0.0000 0.0000 0.0000 20 !G
|
38
2021.05.07_Quantum_Espresso/silicon/pw.bands.silicon.in
Normal file
38
2021.05.07_Quantum_Espresso/silicon/pw.bands.silicon.in
Normal file
@@ -0,0 +1,38 @@
|
||||
&control
|
||||
calculation = 'bands',
|
||||
restart_mode = 'from_scratch',
|
||||
prefix = 'silicon',
|
||||
outdir = './tmp/'
|
||||
pseudo_dir = '/opt/qe-7.3.1/pseudo'
|
||||
verbosity = 'high'
|
||||
/
|
||||
|
||||
&system
|
||||
ibrav = 2,
|
||||
celldm(1) = 10.2076,
|
||||
nat = 2,
|
||||
ntyp = 1,
|
||||
ecutwfc = 50,
|
||||
ecutrho = 400,
|
||||
nbnd = 8
|
||||
/
|
||||
|
||||
&electrons
|
||||
conv_thr = 1e-8,
|
||||
mixing_beta = 0.6
|
||||
/
|
||||
|
||||
ATOMIC_SPECIES
|
||||
Si 28.086 Si.pz-vbc.UPF
|
||||
|
||||
ATOMIC_POSITIONS (alat)
|
||||
Si 0.00 0.00 0.00
|
||||
Si 0.25 0.25 0.25
|
||||
|
||||
K_POINTS {crystal_b}
|
||||
5
|
||||
0.0000 0.5000 0.0000 20 !L
|
||||
0.0000 0.0000 0.0000 30 !G
|
||||
-0.500 0.0000 -0.500 10 !X
|
||||
-0.375 0.2500 -0.375 30 !U
|
||||
0.0000 0.0000 0.0000 20 !G
|
36
2021.05.07_Quantum_Espresso/silicon/pw.scf.silicon_bands.in
Normal file
36
2021.05.07_Quantum_Espresso/silicon/pw.scf.silicon_bands.in
Normal file
@@ -0,0 +1,36 @@
|
||||
&CONTROL
|
||||
calculation = 'scf',
|
||||
restart_mode = 'from_scratch',
|
||||
prefix = 'silicon',
|
||||
outdir = './tmp/'
|
||||
pseudo_dir = '/opt/qe-7.3.1/pseudo'
|
||||
verbosity = 'high'
|
||||
/
|
||||
|
||||
&SYSTEM
|
||||
ibrav = 2,
|
||||
celldm(1) = 10.2076,
|
||||
nat = 2,
|
||||
ntyp = 1,
|
||||
ecutwfc = 50,
|
||||
ecutrho = 400,
|
||||
nbnd = 8,
|
||||
! occupations = 'smearing',
|
||||
! smearing = 'gaussian',
|
||||
! degauss = 0.005
|
||||
/
|
||||
|
||||
&ELECTRONS
|
||||
conv_thr = 1e-8,
|
||||
mixing_beta = 0.6
|
||||
/
|
||||
|
||||
ATOMIC_SPECIES
|
||||
Si 28.086 Si.pz-vbc.UPF
|
||||
|
||||
ATOMIC_POSITIONS (alat)
|
||||
Si 0.0 0.0 0.0
|
||||
Si 0.25 0.25 0.25
|
||||
|
||||
K_POINTS (automatic)
|
||||
8 8 8 0 0 0
|
@@ -1,88 +0,0 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/13623
|
||||
"""
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import random
|
||||
import time
|
||||
|
||||
|
||||
# time.sleep(random.uniform(0,1800)) # 爬虫简单伪装,在固定时间后0到30分钟后开始运行。调试的时候把该语句注释。
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
|
||||
# 获取链接
|
||||
try:
|
||||
with open('prb_link_list.txt', 'r', encoding='UTF-8') as f: # 如果文件存在
|
||||
link_list = f.read().split('\n') # 历史已经访问过的链接(数组类型)
|
||||
except:
|
||||
with open('prb_link_list.txt', 'w', encoding='UTF-8') as f: # 如果文件不存在
|
||||
link_list = []
|
||||
f = open('prb_link_list.txt', 'a', encoding='UTF-8') # 打开文件(补充)
|
||||
f.write('\nLink list obtained on '+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+':\n')
|
||||
match_href = [] # 在本次运行中满足条件的链接
|
||||
for loop in range(3):
|
||||
if loop == 0:
|
||||
start_link = "https://journals.aps.org/prb/recent?page=1" # 看第一页
|
||||
elif loop == 1:
|
||||
start_link = "https://journals.aps.org/prb/recent?page=2" # 看第二页
|
||||
elif loop == 2:
|
||||
start_link = "https://journals.aps.org/prb/recent?page=3" # 看第三页(三页基本上覆盖了当天的所有更新)
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_a_tag = soup.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/abstract/', href): # 文章的链接
|
||||
if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://journals.aps.org'+ href
|
||||
if href not in match_href and href not in link_list and re.search('\?', href)==None: # 链接不重复
|
||||
match_href.append(href)
|
||||
f.write(href+'\n')
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
# 获取摘要
|
||||
try:
|
||||
f = open('prb_all.txt', 'a', encoding='UTF-8') # 全部记录
|
||||
except:
|
||||
f = open('prb_all.txt', 'w', encoding='UTF-8') # 如果文件不存在
|
||||
try:
|
||||
f_month = open('prb_'+str(year)+'.'+str(month).rjust(2,'0')+'.txt', 'a', encoding='UTF-8') # 一个月的记录
|
||||
except:
|
||||
f_month = open('prb_'+str(year)+'.'+str(month).rjust(2,'0')+'.txt', 'w', encoding='UTF-8') # 如果文件不存在
|
||||
f.write('\n\n['+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+'][total number='+str(len(match_href))+']\n\n\n')
|
||||
f_month.write('\n\n['+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+'][total number='+str(len(match_href))+']\n\n\n')
|
||||
print('total number=', len(match_href)) # 调试的时候显示这个
|
||||
i00 = 0
|
||||
for href in match_href:
|
||||
i00 += 1
|
||||
print('reading number', i00, '...') # 调试的时候显示这个
|
||||
# time.sleep(random.uniform(10,110)) # 爬虫简单伪装,休息一分钟左右。如果链接个数有60个,那么程序运行时间延长60分钟。调试的时候把该语句注释。
|
||||
try:
|
||||
html = urlopen(href).read().decode('utf-8') # 打开文章链接
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
title = soup.title # 文章标题
|
||||
f.write(str(title.get_text())+'\n\n')
|
||||
f_month.write(str(title.get_text())+'\n\n')
|
||||
f.write(str(href)+'\n\n') # 文章链接
|
||||
f_month.write(str(href)+'\n\n')
|
||||
abstract = re.findall('"yes"><p>.*</p><div', html, re.S)[0][9:-8] # 文章摘要
|
||||
word_list = abstract.split(' ') # 划分单词
|
||||
for word in word_list:
|
||||
if re.search('<', word)==None and re.search('>', word)==None: # 有些内容满足过滤条件,因此信息可能会丢失。
|
||||
f.write(word+' ')
|
||||
f_month.write(word+' ')
|
||||
f.write('\n\n\n')
|
||||
f_month.write('\n\n\n')
|
||||
except:
|
||||
pass
|
||||
f.close()
|
@@ -1,88 +0,0 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/13623
|
||||
"""
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import random
|
||||
import time
|
||||
|
||||
|
||||
# time.sleep(random.uniform(0,1800)) # 爬虫简单伪装,在固定时间后0到30分钟后开始运行。调试的时候把该语句注释。
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
|
||||
# 获取链接
|
||||
try:
|
||||
with open('prl_link_list.txt', 'r', encoding='UTF-8') as f: # 如果文件存在
|
||||
link_list = f.read().split('\n') # 历史已经访问过的链接(数组类型)
|
||||
except:
|
||||
with open('prl_link_list.txt', 'w', encoding='UTF-8') as f: # 如果文件不存在
|
||||
link_list = []
|
||||
f = open('prl_link_list.txt', 'a', encoding='UTF-8') # 打开文件(补充)
|
||||
f.write('\nLink list obtained on '+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+':\n')
|
||||
match_href = [] # 在本次运行中满足条件的链接
|
||||
for loop in range(3):
|
||||
if loop == 0:
|
||||
start_link = "https://journals.aps.org/prl/recent?page=1" # 看第一页
|
||||
elif loop == 1:
|
||||
start_link = "https://journals.aps.org/prl/recent?page=2" # 看第二页
|
||||
elif loop == 2:
|
||||
start_link = "https://journals.aps.org/prl/recent?page=3" # 看第三页(三页基本上覆盖了当天的所有更新)
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_a_tag = soup.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/abstract/', href): # 文章的链接
|
||||
if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://journals.aps.org'+ href
|
||||
if href not in match_href and href not in link_list and re.search('\?', href)==None: # 链接不重复
|
||||
match_href.append(href)
|
||||
f.write(href+'\n')
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
# 获取摘要
|
||||
try:
|
||||
f = open('prl_all.txt', 'a', encoding='UTF-8') # 全部记录
|
||||
except:
|
||||
f = open('prl_all.txt', 'w', encoding='UTF-8') # 如果文件不存在
|
||||
try:
|
||||
f_month = open('prl_'+str(year)+'.'+str(month).rjust(2,'0')+'.txt', 'a', encoding='UTF-8') # 一个月的记录
|
||||
except:
|
||||
f_month = open('prl_'+str(year)+'.'+str(month).rjust(2,'0')+'.txt', 'w', encoding='UTF-8') # 如果文件不存在
|
||||
f.write('\n\n['+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+'][total number='+str(len(match_href))+']\n\n\n')
|
||||
f_month.write('\n\n['+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+'][total number='+str(len(match_href))+']\n\n\n')
|
||||
print('total number=', len(match_href)) # 调试的时候显示这个
|
||||
i00 = 0
|
||||
for href in match_href:
|
||||
i00 += 1
|
||||
print('reading number', i00, '...') # 调试的时候显示这个
|
||||
# time.sleep(random.uniform(10,110)) # 爬虫简单伪装,休息一分钟左右。如果链接个数有60个,那么程序运行时间延长60分钟。调试的时候把该语句注释。
|
||||
try:
|
||||
html = urlopen(href).read().decode('utf-8') # 打开文章链接
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
title = soup.title # 文章标题
|
||||
f.write(str(title.get_text())+'\n\n')
|
||||
f_month.write(str(title.get_text())+'\n\n')
|
||||
f.write(str(href)+'\n\n') # 文章链接
|
||||
f_month.write(str(href)+'\n\n')
|
||||
abstract = re.findall('"yes"><p>.*</p><div', html, re.S)[0][9:-8] # 文章摘要
|
||||
word_list = abstract.split(' ') # 划分单词
|
||||
for word in word_list:
|
||||
if re.search('<', word)==None and re.search('>', word)==None: # 有些内容满足过滤条件,因此信息可能会丢失。
|
||||
f.write(word+' ')
|
||||
f_month.write(word+' ')
|
||||
f.write('\n\n\n')
|
||||
f_month.write('\n\n\n')
|
||||
except:
|
||||
pass
|
||||
f.close()
|
@@ -1,41 +0,0 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/13623
|
||||
"""
|
||||
|
||||
import re
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def main():
|
||||
file_name = 'prb_all.txt'
|
||||
with open(file_name, 'r', encoding='UTF-8') as f: # 打开文件
|
||||
paper_list = f.read().split('\n\n\n') # 通过三个回车划分不同文章
|
||||
word_list = []
|
||||
ignore = ignore_words() # 过滤常见单词
|
||||
for paper in paper_list:
|
||||
word_list_in_one_paper = []
|
||||
if len(paper)>20: # 通过字符串长度过滤日期
|
||||
content_list = paper.split('\n\n') # 通过两个回车划分内容
|
||||
for content in content_list:
|
||||
if re.search('https://', content)==None: # 过滤文章链接
|
||||
words = content.split(' ') # 通过空格划分单词
|
||||
for word in words:
|
||||
if word not in word_list_in_one_paper: # 一篇文章的某个单词只统计一次
|
||||
if word not in ignore and len(word)>1: # 过滤词汇
|
||||
word_list.append(word)
|
||||
word_list_in_one_paper.append(word)
|
||||
num = 300
|
||||
most_common_words = Counter(word_list).most_common(num) # 统计出现最多的num个词汇
|
||||
print('\n出现频率最高的前', num, '个词汇:')
|
||||
for word in most_common_words:
|
||||
print(word)
|
||||
|
||||
|
||||
def ignore_words(): # 可自行增删
|
||||
ignore = ['Phys.', 'the', 'to', 'of', 'in', 'under', 'and', 'by', 'The', 'at', 'with', 'up', 'be', 'above', 'below', 'are', 'is', 'for', 'that', 'as', 'we', '<a', 'abstract', 'abstract"','<span', 'which', 'We', 'such', 'has', 'two', 'these', 'it', 'all', 'results', 'result', 'each', 'have', 'between', 'on', 'an', 'can', 'also', 'from', 'Our', 'our', 'using', 'where', 'These', 'out', 'both', 'due', 'less', 'along', 'but', 'In', 'show', 'into', 'study', 'find', 'provide', 'change','not', 'open', 'this', 'show', 'into', 'study', 'find', 'provide', 'change', 'present', 'Using', 'large', 'This', 'However', 'appear', 'studied', 'obtain', 'been', 'Both', 'they', 'effects', 'effect', 'compute', 'more', 'does', 'shown', 'Based', 'reveal', 'highly', 'number', 'However,', 'was', 'near', 'full', 'based', 'several', 'suggest', 'agreement', 'predicted', 'values', 'work', 'emphasize', 'without', 'or', 'work,', 'studies', 'future', 'identify', 'present.', 'predict', 'presence', 'their', 'were', 'From', 'its', 'By', 'how', 'ground', 'observed', 'recent', 'For', 'other', 'Here', 'test', 'further', 'Its', 'similar', 'however,', 'range', 'within', 'value', 'possible', 'may', 'than', 'low', 'us', 'obtained', 'around', 'consider', 'about', 'very', 'will', 'when', 'played', 'consist', 'consists', 'Here,', 'observe', 'gives', 'It', 'over', 'cannot', 'As', 'whose', 'new', 'some', 'only', 'from', 'yields', 'shows', 'data', 'direct', 'related', 'different', 'evidence', 'role', 'function', 'origin', 'specific', 'set', 'confirm', 'give', 'Moreover', 'develop', 'including', 'could', 'used', 'means', 'allows', 'make', 'e.g.,', 'provides', 'system', 'systems', 'field', 'fields', 'model', 'model,', 'state', 'states', 'states.', 'state.', 'band', 'bands', 'method', 'methods', 'nature', 'rate', 'zero', 'single', 'theory', 'first', 'one', 'complex', 'approach', 'schemes', 'terms', 'even', 'case', 'analysis', 'weight', 'volume', 'evolution', 'well', 'external', 'measured', 'introducing', 'dependence', 'properties', 'demonstrate', 'remains', 'through', 'measurements', 'samples', 'findings', 'respect', 'investigate', 'behavior', 'importance', 'considered', 'experimental', 'increase', 'propose', 'follows', 'increase', 'emerged', 'interesting', 'behaviors', 'influenced', 'paramount', 'indicate', 'Rev.', 'concepts', 'induced', 'zone', 'regions', 'exact', 'contribution', 'behavior', 'formation', 'measurements.', 'utilizing', 'constant', 'regime', 'features', 'strength', 'compare', 'determined', 'combination', 'compare', 'determined', 'At', 'inside', 'ambient', 'then', 'important', 'report', 'Moreover,', 'Despite', 'found', 'because', 'process', 'and,', 'significantly', 'realized', 'much', 'natural', 'since', 'grows', 'any', 'compared', 'while', 'forms.', 'appears', 'indicating', 'coefficient', 'suggested', 'time', 'exhibits', 'calculations.', 'developed', 'array', 'discuss', 'field', 'becomes', 'allowing', 'indicates', 'via', 'introduce', 'considering', 'times.', 'constructed', 'explain', 'form', 'owing', 'parameters.', 'parameter', 'operation', 'probe', 'experiments', 'interest', 'strategies', 'seen', 'emerge', 'generic', 'geometry', 'numbers', 'observation', 'avenue', 'theretically', 'three', 'excellent', 'amount', 'notable', 'example', 'being', 'promising', 'latter', 'little', 'imposed', 'put', 'resource', 'together', 'produce', 'successfully','there', 'enhanced', 'this', 'great', 'dirven', 'increasing','should', 'otherwise', 'Further', 'field,', 'known', 'changes', 'still', 'beyond', 'various', 'center', 'previously', 'way', 'peculiar', 'detailed', 'understanding', 'good', 'years', 'where', 'Me', 'origins', 'years.', 'attributed', 'known,', 'them', 'reported', 'no', 'systems', 'agree', 'examined', 'rise', 'calculate', 'those', 'particular', 'relation', 'defined', 'either', 'again', 'current', 'exhibit', 'calculated', 'here', 'made', 'Further', 'consisting', 'constitutes', 'originated', 'if', 'exceed', 'access']
|
||||
return ignore
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -1,37 +0,0 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
import datetime
|
||||
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
|
||||
f = open('nature_physics.html', 'w', encoding='UTF-8')
|
||||
f.write('<meta charset="utf-8"><style type="text/css">a{text-decoration: none;color: #0a5794;}a:hover {text-decoration: underline;color: red; }</style>')
|
||||
f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
|
||||
|
||||
match_href = []
|
||||
start_link = "https://www.nature.com/nphys/research-articles"
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_article = soup.find_all('article', {"class":"u-full-height c-card c-card--flush"})
|
||||
for article in all_article:
|
||||
all_a_tag = article.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/articles/', href): # 文章的链接
|
||||
if re.search('https://www.nature.com', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://www.nature.com'+ href
|
||||
if href not in match_href and re.search('\?', href)==None: # 链接不重复
|
||||
match_href.append(href)
|
||||
f.write('<li><a target=\"_blank\" href=\"')
|
||||
f.write(href) # 文章链接
|
||||
f.write('\">')
|
||||
f.write(a_tag.get_text())
|
||||
f.write('</a> ')
|
||||
time = article.find('time', {"class": "c-meta__item c-meta__item--block-at-lg"}).get_text()
|
||||
f.write(time+'</li>')
|
||||
f.close()
|
@@ -1,36 +0,0 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
import datetime
|
||||
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
f = open('physics_magazine.html', 'w', encoding='UTF-8')
|
||||
f.write('<meta charset="utf-8"><style type="text/css">a{text-decoration: none;color: #0a5794;}a:hover {text-decoration: underline;color: red; }</style>')
|
||||
f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
|
||||
|
||||
match_href = []
|
||||
start_link = "https://physics.aps.org/"
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_articles = soup.find_all('div', {"class":"feed-item-details"})
|
||||
for article in all_articles:
|
||||
all_a_tag = article.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/articles/', href): # 文章的链接
|
||||
if re.search('https://physics.aps.org', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://physics.aps.org'+ href
|
||||
if href not in match_href:
|
||||
match_href.append(href)
|
||||
f.write('<li><a target=\"_blank\" href=\"')
|
||||
f.write(href) # 文章链接
|
||||
f.write('\">')
|
||||
f.write(a_tag.get_text())
|
||||
f.write('</a> ')
|
||||
time = article.find('time', {"class": "feed-item-date"}).get_text()
|
||||
f.write(time+'</li>')
|
||||
f.close()
|
@@ -1,42 +0,0 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
import datetime
|
||||
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
|
||||
f = open('prb.html', 'w', encoding='UTF-8')
|
||||
f.write('<meta charset="utf-8"><style type="text/css">a{text-decoration: none;color: #0a5794;}a:hover {text-decoration: underline;color: red; }</style>')
|
||||
f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
|
||||
|
||||
match_href = []
|
||||
for loop in range(1):
|
||||
if loop == 0:
|
||||
start_link = "https://journals.aps.org/prb/recent" # 看第一页
|
||||
# elif loop == 1:
|
||||
# start_link = "https://journals.aps.org/prb/recent?page=2" # 看第二页
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_article = soup.find_all('div', {"class":"article panel article-result"})
|
||||
for article in all_article:
|
||||
all_a_tag = article.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/abstract/', href): # 文章的链接
|
||||
if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://journals.aps.org'+ href
|
||||
if href not in match_href and re.search('\?', href)==None: # 链接不重复
|
||||
match_href.append(href)
|
||||
f.write('<li><a target=\"_blank\" href=\"')
|
||||
f.write(href) # 文章链接
|
||||
f.write('\">')
|
||||
f.write(a_tag.get_text())
|
||||
f.write('</a> ')
|
||||
info = article.find('h6', {"class": "pub-info"}).get_text()
|
||||
f.write(re.findall('– Published .*', info, re.S)[0][12:]+'</li>')
|
||||
f.close()
|
||||
|
@@ -1,42 +0,0 @@
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
import datetime
|
||||
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
|
||||
f = open('prl.html', 'w', encoding='UTF-8')
|
||||
f.write('<meta charset="utf-8"><style type="text/css">a{text-decoration: none;color: #0a5794;}a:hover {text-decoration: underline;color: red; }</style>')
|
||||
f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
|
||||
|
||||
match_href = []
|
||||
for loop in range(1):
|
||||
if loop == 0:
|
||||
start_link = "https://journals.aps.org/prl/recent" # 看第一页
|
||||
# elif loop == 1:
|
||||
# start_link = "https://journals.aps.org/prl/recent?page=2" # 看第二页
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_article = soup.find_all('div', {"class":"article panel article-result"})
|
||||
for article in all_article:
|
||||
all_a_tag = article.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('/abstract/', href): # 文章的链接
|
||||
if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https://journals.aps.org'+ href
|
||||
if href not in match_href and re.search('\?', href)==None: # 链接不重复
|
||||
match_href.append(href)
|
||||
f.write('<li><a target=\"_blank\" href=\"')
|
||||
f.write(href) # 文章链接
|
||||
f.write('\">')
|
||||
f.write(a_tag.get_text())
|
||||
f.write('</a> ')
|
||||
info = article.find('h6', {"class": "pub-info"}).get_text()
|
||||
f.write(re.findall('– Published.*', info, re.S)[0][12:]+'</li>')
|
||||
f.close()
|
||||
|
@@ -1,66 +0,0 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/17937
|
||||
"""
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.request import urlopen
|
||||
import re
|
||||
import datetime
|
||||
|
||||
year = datetime.datetime.now().year
|
||||
month = datetime.datetime.now().month
|
||||
day = datetime.datetime.now().day
|
||||
|
||||
# 获取链接
|
||||
|
||||
# 由于没有模拟登录知乎,因此只能爬取到最新的两篇文章
|
||||
authors = ["https://www.zhihu.com/people/guanjihuan/posts"] # Guan
|
||||
|
||||
match_href = []
|
||||
for i0 in range(len(authors)):
|
||||
start_link = authors[i0]
|
||||
html = urlopen(start_link).read().decode('utf-8') # 打开网页
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
all_a_tag = soup.find_all('a', href=True) # 获取超链接标签
|
||||
for a_tag in all_a_tag:
|
||||
href = a_tag['href'] # 超链接字符串
|
||||
if re.search('//zhuanlan.zhihu.com/p/', href) and not re.search('edit', href): # 文章的链接
|
||||
if re.search('https:', href)==None: # 如果链接不是完整的,那么补充完整
|
||||
href = 'https:'+ href
|
||||
if href not in match_href:
|
||||
match_href.append(href)
|
||||
|
||||
|
||||
# 对链接进行排序
|
||||
numbers = []
|
||||
match_href_new = []
|
||||
for href in match_href:
|
||||
numbers.append(int(href[29:]))
|
||||
numbers.sort(reverse = True)
|
||||
for n in numbers:
|
||||
match_href_new.append('https://zhuanlan.zhihu.com/p/'+str(n))
|
||||
|
||||
|
||||
# 获取内容并写入文件
|
||||
f = open('zhihu.html', 'w', encoding='UTF-8')
|
||||
f.write('<meta charset="utf-8"><style type="text/css">a{text-decoration: none;color: #004e4e;}a:hover {text-decoration: underline;color: red; }</style>')
|
||||
|
||||
f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
|
||||
for href in match_href_new:
|
||||
try:
|
||||
html = urlopen(href).read().decode('utf-8') # 打开文章链接
|
||||
soup = BeautifulSoup(html, features='lxml') # 放入soup中
|
||||
title = soup.title # 文章标题
|
||||
f.write('<li><a target=\"_blank\" href=\"')
|
||||
f.write(str(href)) # 文章链接
|
||||
f.write('\">')
|
||||
f.write(str(title.get_text()[:-5]))
|
||||
f.write('</a> ')
|
||||
author = soup.find("span", {"class": "UserLink AuthorInfo-name"})
|
||||
f.write(str(author.get_text()+' '))
|
||||
post_time = soup.find("div", {"class" : "ContentItem-time"})
|
||||
f.write(str(post_time.get_text()[4:-6])+'</li>')
|
||||
except:
|
||||
pass
|
||||
f.close()
|
@@ -1,42 +0,0 @@
|
||||
import numpy as np
|
||||
from numba import jit
|
||||
import time
|
||||
|
||||
def for_sum(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
@jit
|
||||
def numba_for_sum(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
numpy_array = np.arange(0,1e8,1)
|
||||
|
||||
start = time.time()
|
||||
result = sum(numpy_array)
|
||||
end = time.time()
|
||||
print('\nresult:', result)
|
||||
print('python中sum()函数求和时间:\n', end - start)
|
||||
|
||||
start = time.time()
|
||||
result = np.sum(numpy_array)
|
||||
end = time.time()
|
||||
print('\nresult:', result)
|
||||
print('numpy.sum()函数求和时间:\n', end - start)
|
||||
|
||||
start = time.time()
|
||||
result = for_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('\nresult:', result)
|
||||
print('for循环求和numpy数组的时间:\n', end - start)
|
||||
|
||||
start = time.time()
|
||||
result = numba_for_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('\nresult:', result)
|
||||
print('numba加速for循环求和numpy数组的时间:\n', end - start, '\n')
|
78
2022.03.06_numba_time/numba_time_1.py
Normal file
78
2022.03.06_numba_time/numba_time_1.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from numba import jit
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
numpy_array = np.arange(0,1e5,1)
|
||||
times = 1000
|
||||
|
||||
def for_sum(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = for_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('for循环求和时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = sum(numpy_array)
|
||||
end = time.time()
|
||||
print('sum()函数求和时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = np.sum(numpy_array)
|
||||
end = time.time()
|
||||
print('numpy.sum()函数求和时间:', end - start)
|
||||
|
||||
print()
|
||||
|
||||
@jit
|
||||
def numba_for_sum(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
@jit
|
||||
def numba_np_sum(numpy_array):
|
||||
result = np.sum(numpy_array)
|
||||
return result
|
||||
|
||||
@jit(nopython=True)
|
||||
def numba_nopython_np_sum(numpy_array):
|
||||
result = np.sum(numpy_array)
|
||||
return result
|
||||
|
||||
@jit(nopython=True, parallel=True)
|
||||
def numba_nopython_parallel_np_sum(numpy_array):
|
||||
result = np.sum(numpy_array)
|
||||
return result
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_for_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('numba + for循环求和时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_np_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('numba + numpy.sum()函数求和时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_nopython_np_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('numba(nopython) + numpy.sum()函数求和时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_nopython_parallel_np_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('numba(nopython,parallel) + numpy.sum()函数求和时间:', end - start)
|
58
2022.03.06_numba_time/numba_time_2.py
Normal file
58
2022.03.06_numba_time/numba_time_2.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from numba import jit
|
||||
from numba import prange
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
numpy_array = np.arange(0,1e5,1)
|
||||
times = 1000
|
||||
|
||||
def for_sum(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
@jit
|
||||
def numba_for_sum_1(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
@jit(nopython=True)
|
||||
def numba_for_sum_2(numpy_array):
|
||||
sum = 0
|
||||
for number in numpy_array:
|
||||
sum += number
|
||||
return sum
|
||||
|
||||
@jit(nopython=True, parallel=True)
|
||||
def numba_for_sum_3(numpy_array):
|
||||
sum = 0
|
||||
for i in prange(len(numpy_array)):
|
||||
sum += numpy_array[i]
|
||||
return sum
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = for_sum(numpy_array)
|
||||
end = time.time()
|
||||
print('for循环时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_for_sum_1(numpy_array)
|
||||
end = time.time()
|
||||
print('@jit时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_for_sum_2(numpy_array)
|
||||
end = time.time()
|
||||
print('@jit(nopython=True)时间:', end - start)
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_for_sum_3(numpy_array)
|
||||
end = time.time()
|
||||
print('@jit(nopython=True, parallel=True)时间:', end - start)
|
20
2022.03.16_frequently_used_python_package/numba_example.py
Normal file
20
2022.03.16_frequently_used_python_package/numba_example.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
numpy_array = np.arange(0,1e5,1)
|
||||
times = 1000
|
||||
|
||||
from numba import jit
|
||||
from numba import prange
|
||||
@jit(nopython=True, parallel=True)
|
||||
def numba_example(numpy_array):
|
||||
sum = 0
|
||||
for i in prange(len(numpy_array)):
|
||||
sum += numpy_array[i]
|
||||
return sum
|
||||
|
||||
start = time.time()
|
||||
for _ in range(times):
|
||||
result = numba_example(numpy_array)
|
||||
end = time.time()
|
||||
print(f'运行时间:{end - start}')
|
@@ -1,7 +1,20 @@
|
||||
import pickle
|
||||
|
||||
data = [1, 2, 3]
|
||||
with open('a.txt', 'wb') as f:
|
||||
|
||||
# 保存为文件
|
||||
with open('a.pkl', 'wb') as f:
|
||||
pickle.dump(data, f)
|
||||
with open('a.txt', 'rb') as f:
|
||||
data_load = pickle.load(f)
|
||||
print(data_load)
|
||||
with open('a.pkl', 'rb') as f:
|
||||
data_load_from_file = pickle.load(f)
|
||||
print(data_load_from_file)
|
||||
print()
|
||||
|
||||
# 把对象转换成字节流
|
||||
serialized_data = pickle.dumps(data) # 转换成字节流
|
||||
print(type(serialized_data))
|
||||
print(serialized_data)
|
||||
print()
|
||||
loaded_data = pickle.loads(serialized_data) # 转换成原类型
|
||||
print(type(loaded_data))
|
||||
print(loaded_data)
|
@@ -1,9 +1,60 @@
|
||||
a = [1, 2] # 数组
|
||||
a = [1, 2] # list列表
|
||||
print(a)
|
||||
print(len(a)) # 数组长度
|
||||
a.append(3) # 增加元素
|
||||
print(type(a)) # 对象类型
|
||||
print(id(a)) # 对象唯一标识符
|
||||
print(len(a)) # 列表长度
|
||||
a.append(3) # list列表增加元素
|
||||
print(a)
|
||||
b = range(5) # 数列(从0开始)
|
||||
print(b)
|
||||
for i0 in b:
|
||||
print(sum(a)) # 求和
|
||||
print(max(a)) # 最大值
|
||||
print(min(a)) # 最小值
|
||||
print(abs(-3.14)) # 绝对值
|
||||
b1 = [2, -1, 3]
|
||||
b2 = sorted(b1) # 排序,不改变原数列
|
||||
print(b1)
|
||||
print(b2)
|
||||
b3 = list(reversed(b1)) # 反向并转为list列表
|
||||
print(b1)
|
||||
print(b3)
|
||||
c = range(5) # 数列,从0开始
|
||||
print(c)
|
||||
for i0 in c:
|
||||
print(i0)
|
||||
d1 = [1, 2, 3, 3, 2, 1, 1]
|
||||
d2 = set(d1) # 转成集合,去除重复元素
|
||||
print(d1)
|
||||
print(d2)
|
||||
print(list(d2))
|
||||
|
||||
print()
|
||||
dict_data = {"name": "张三", "age": 30, "city": "北京"} # dict字典
|
||||
print(dict_data)
|
||||
print(type(dict_data))
|
||||
print(dict_data.items())
|
||||
for key, value in dict_data.items():
|
||||
print(f'打印字典内容 {key} {value}')
|
||||
|
||||
print() # 打印空一行
|
||||
print(all([True, True, False])) # 所有元素为真,结果为真
|
||||
print(all([1, 2, True]))
|
||||
print(any([True, True, False])) # 有一个是真,结果为真
|
||||
print(any([0, None, ""]))
|
||||
|
||||
print()
|
||||
e = 'abc'
|
||||
print(e)
|
||||
print(hash(e)) # 哈希值(如果是多次运行,对于相同的对象,返回的哈希值是不同的)
|
||||
print(hash(e)) # 哈希值(同一个运行中多次调用 hash(),对于相同的对象,返回的哈希值是相同的)
|
||||
|
||||
print()
|
||||
for i0 in range(3):
|
||||
exec(f'''
|
||||
a{i0} = {i0}
|
||||
print(a{i0})
|
||||
''') # 执行动态创建的代码
|
||||
f = eval('1+2') # 执行表达式并返回值
|
||||
print(f)
|
||||
|
||||
f = open('a.txt', 'w') # 打开文件
|
||||
f.write('test') # 写入文件
|
||||
f.close() # 关闭文件
|
@@ -1,2 +1,2 @@
|
||||
parameter=0
|
||||
parameter = 0
|
||||
print(f'hello world {parameter}')
|
@@ -2,6 +2,10 @@ import guan
|
||||
|
||||
parameter_array = [1, 2, 3, 4]
|
||||
|
||||
guan.make_sh_file_for_bsub(sh_filename='a', command_line='python a.py', cpu_num=1, task_name='task', queue_name='score', cd_dir=0)
|
||||
sh_filename = 'a'
|
||||
task_name = 'task'
|
||||
py_filename = 'a'
|
||||
|
||||
guan.copy_py_sh_file_and_bsub_task(parameter_array, py_filename='a', old_str_in_py='parameter=0', new_str_in_py='parameter=', sh_filename='a', bsub_task_name='task')
|
||||
guan.make_sh_file_for_bsub(sh_filename=sh_filename, command_line=f'python {py_filename}.py', cpu_num=1, task_name=task_name, queue_name='score', cd_dir=0)
|
||||
|
||||
guan.copy_py_sh_file_and_bsub_task(parameter_array, py_filename=py_filename, old_str_in_py='parameter = 0', new_str_in_py='parameter = ', sh_filename=sh_filename, task_name=task_name)
|
@@ -1,2 +1,2 @@
|
||||
parameter=0
|
||||
parameter = 0
|
||||
print(f'hello world {parameter}')
|
@@ -2,6 +2,10 @@ import guan
|
||||
|
||||
parameter_array = [1, 2, 3, 4]
|
||||
|
||||
guan.make_sh_file_for_qsub(sh_filename='a', command_line='python a.py', cpu_num=1, task_name='task', cd_dir=0)
|
||||
sh_filename = 'a'
|
||||
task_name = 'task'
|
||||
py_filename = 'a'
|
||||
|
||||
guan.copy_py_sh_file_and_qsub_task(parameter_array=parameter_array, py_filename='a', old_str_in_py='parameter=0', new_str_in_py='parameter=', sh_filename='a', qsub_task_name='task')
|
||||
guan.make_sh_file_for_qsub(sh_filename=sh_filename, command_line=f'python {py_filename}.py', cpu_num=1, task_name=task_name, cd_dir=0)
|
||||
|
||||
guan.copy_py_sh_file_and_qsub_task(parameter_array=parameter_array, py_filename=py_filename, old_str_in_py='parameter = 0 ', new_str_in_py='parameter = ', sh_filename=sh_filename, task_name=task_name)
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
#PBS -N task
|
||||
#PBS -l nodes=1:ppn=1
|
||||
#PBS -q bigmem
|
||||
python a.py
|
||||
|
@@ -29,8 +29,6 @@ for parameter_str in parameter_str_array:
|
||||
with open('a'+str(index)+'.py', 'w') as f: # 写入
|
||||
f.write(content)
|
||||
|
||||
|
||||
|
||||
# 以下处理任务上传文件
|
||||
old_file = 'a.sh'
|
||||
new_file = 'a'+str(index)+'.sh'
|
||||
@@ -50,7 +48,5 @@ for parameter_str in parameter_str_array:
|
||||
with open('a'+str(index)+'.sh', 'w') as f: # 写入
|
||||
f.write(content)
|
||||
|
||||
|
||||
|
||||
# 提交任务
|
||||
os.system('qsub '+new_file)
|
@@ -10,9 +10,9 @@ print(title)
|
||||
print(stock_data[0])
|
||||
num = 30
|
||||
date_array = stock_data[0:num, 0]
|
||||
opening_array = stock_data[0:num, 1]
|
||||
closing_array = stock_data[0:num, 2]
|
||||
high_array = stock_data[0:num, 3]
|
||||
low_array = stock_data[0:num, 4]
|
||||
opening_array = stock_data[0:num, 2]
|
||||
closing_array = stock_data[0:num, 3]
|
||||
high_array = stock_data[0:num, 4]
|
||||
low_array = stock_data[0:num, 5]
|
||||
guan.plot(date_array, closing_array, style='o-', xlabel='date', ylabel='price')
|
||||
guan.plot_stock_line(date_array, opening_array, closing_array, high_array, low_array)
|
@@ -44,7 +44,8 @@ stock_symbols_30 = []
|
||||
for stock_symbol in stock_symbols:
|
||||
find_300 = re.findall(r'^300', stock_symbol)
|
||||
find_301 = re.findall(r'^301', stock_symbol)
|
||||
if find_300 != [] or find_301 != []:
|
||||
find_302 = re.findall(r'^302', stock_symbol)
|
||||
if find_300 != [] or find_301 != [] or find_302 != []:
|
||||
stock_symbols_30.append(stock_symbol)
|
||||
num_stocks_30 = len(stock_symbols_30)
|
||||
print('创业板股票数量:', num_stocks_30)
|
||||
@@ -61,21 +62,19 @@ num_stocks_68= len(stock_symbols_68)
|
||||
print('科创板股票数量:', num_stocks_68)
|
||||
# print(stock_symbols_68)
|
||||
|
||||
# 新三板以及北交所
|
||||
# 北交所和新三板
|
||||
stock_symbols_8_4_9 = []
|
||||
for stock_symbol in stock_symbols:
|
||||
find_82 = re.findall(r'^82', stock_symbol)
|
||||
find_83 = re.findall(r'^83', stock_symbol)
|
||||
find_87 = re.findall(r'^87', stock_symbol)
|
||||
find_88 = re.findall(r'^88', stock_symbol)
|
||||
find_430 = re.findall(r'^430', stock_symbol)
|
||||
find_420 = re.findall(r'^420', stock_symbol)
|
||||
find_400 = re.findall(r'^400', stock_symbol)
|
||||
find_920 = re.findall(r'^920', stock_symbol)
|
||||
if find_82 != [] or find_83 != [] or find_87 != [] or find_88 != [] or find_430 != [] or find_420 != [] or find_400 != [] or find_920 != []:
|
||||
if find_83 != [] or find_87 != [] or find_430 != [] or find_420 != [] or find_400 != [] or find_920 != []:
|
||||
stock_symbols_8_4_9.append(stock_symbol)
|
||||
num_stocks_8_4_9= len(stock_symbols_8_4_9)
|
||||
print('新三板以及北交所股票数量:', num_stocks_8_4_9)
|
||||
print('北交所和新三板股票数量:', num_stocks_8_4_9)
|
||||
# print(stock_symbols_8_4)
|
||||
|
||||
print('所有股票数量:', num_stocks_60+num_stocks_00+num_stocks_30+num_stocks_68+num_stocks_8_4_9)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# 数组分割示例
|
||||
# 数组分割
|
||||
import numpy as np
|
||||
import guan
|
||||
task_num = 4
|
@@ -0,0 +1,7 @@
|
||||
# 变量写入文件
|
||||
import guan
|
||||
import numpy as np
|
||||
data = np.array([1, 2, 3])
|
||||
guan.dump_data(data, filename='a')
|
||||
loaded_data = guan.load_data(filename='a')
|
||||
print(loaded_data)
|
@@ -0,0 +1,16 @@
|
||||
# 检查是否为厄米矩阵(相对误差为1e-5)
|
||||
import guan
|
||||
|
||||
matrix1 = [
|
||||
[2, 1.00001-1j],
|
||||
[1+1j, 1]
|
||||
]
|
||||
|
||||
print(guan.is_hermitian(matrix1))
|
||||
|
||||
matrix2 = [
|
||||
[2, 1.00002-1j],
|
||||
[1+1j, 1]
|
||||
]
|
||||
|
||||
print(guan.is_hermitian(matrix2))
|
@@ -0,0 +1,33 @@
|
||||
# 循环参数计算
|
||||
|
||||
import guan
|
||||
import numpy as np
|
||||
|
||||
def test_1(x):
|
||||
return 2*x
|
||||
|
||||
x_array = np.arange(0, 5, 1)
|
||||
result_array = guan.loop_calculation_with_one_parameter(test_1, x_array)
|
||||
print(result_array)
|
||||
guan.plot(x_array, result_array)
|
||||
print()
|
||||
|
||||
def test_2(x, y):
|
||||
return x+y
|
||||
|
||||
x_array = np.arange(0, 5, 1)
|
||||
y_array = np.arange(0, 3, 1)
|
||||
result_array = guan.loop_calculation_with_two_parameters(test_2, x_array, y_array)
|
||||
print(result_array)
|
||||
guan.plot_contour(x_array, y_array, result_array)
|
||||
print()
|
||||
|
||||
def test_3(x, y, z):
|
||||
return x+y+z
|
||||
|
||||
x_array = np.arange(0, 5, 1)
|
||||
y_array = np.arange(0, 3, 1)
|
||||
z_array = np.arange(0, 2, 1)
|
||||
result_array = guan.loop_calculation_with_three_parameters(test_3, x_array, y_array, z_array)
|
||||
print(result_array)
|
||||
guan.plot_contour(y_array, z_array, result_array[:, :, 4])
|
@@ -0,0 +1,15 @@
|
||||
# 并行计算
|
||||
import guan
|
||||
import time
|
||||
import os
|
||||
|
||||
def run_proc(name):
|
||||
start_time = time.time()
|
||||
time.sleep(5)
|
||||
end_time = time.time()
|
||||
print ('Process id running on name %s = %s' % (name, os.getpid()), '; running time = %s' % (end_time-start_time))
|
||||
return f'name_{name}'
|
||||
|
||||
if __name__ == '__main__':
|
||||
result_array = guan.parallel_calculation_with_multiprocessing_Pool(func=run_proc, args_list=range(32), show_time=1)
|
||||
print(result_array)
|
@@ -0,0 +1,4 @@
|
||||
# 打印数组
|
||||
import guan
|
||||
a = [1, 2, 3, 'a', 'b', 'c']
|
||||
guan.print_array(a)
|
@@ -0,0 +1,7 @@
|
||||
# 运行时间日志
|
||||
import guan
|
||||
import time
|
||||
guan.logging_with_day_and_time(content='start')
|
||||
for i in range(3):
|
||||
time.sleep(5)
|
||||
guan.logging_with_day_and_time(f'end_of_{i}')
|
25
2024.01.16_GUAN_package_learning/common/example_of_timer.py
Normal file
25
2024.01.16_GUAN_package_learning/common/example_of_timer.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# 函数计时器
|
||||
import guan
|
||||
|
||||
@guan.timer_decorator
|
||||
def test1(a, b):
|
||||
import time
|
||||
print(a)
|
||||
time.sleep(1)
|
||||
print(b)
|
||||
print('Run finished.')
|
||||
|
||||
for _ in range(2):
|
||||
test1(10, b=20)
|
||||
|
||||
print()
|
||||
|
||||
def test2(a, b):
|
||||
import time
|
||||
print(a)
|
||||
time.sleep(1)
|
||||
print(b)
|
||||
print('Run finished.')
|
||||
|
||||
for _ in range(2):
|
||||
guan.timer(test2, 100, b=200)
|
@@ -0,0 +1,22 @@
|
||||
import guan
|
||||
|
||||
@guan.try_decorator
|
||||
def test1(a, b):
|
||||
print(a)
|
||||
bug_code
|
||||
print(b)
|
||||
return 'return_message1'
|
||||
|
||||
result1 = test1(10, b=20)
|
||||
print(result1)
|
||||
|
||||
print()
|
||||
|
||||
def test2(a, b):
|
||||
print(a)
|
||||
bug_code
|
||||
print(b)
|
||||
return 'return_message2'
|
||||
|
||||
result2 = guan.try_except(test2, 100, b=200)
|
||||
print(result2)
|
@@ -0,0 +1,6 @@
|
||||
# 矩阵写入文件后查看
|
||||
import guan
|
||||
import numpy as np
|
||||
matrix = np.random.rand(5, 5)
|
||||
guan.write_matrix_in_markdown_format(matrix=matrix, filename='markdown_matrix')
|
||||
guan.write_matrix_in_latex_format(matrix=matrix, filename='latex_matrix')
|
@@ -1,4 +1,4 @@
|
||||
# 能带图计算示例
|
||||
# 能带图计算
|
||||
import guan
|
||||
import numpy as np
|
||||
k_array = np.linspace(-np.pi, np.pi, 100)
|
@@ -1,4 +1,4 @@
|
||||
# 陈数和Wilson loop计算示例
|
||||
# 陈数和Wilson loop计算
|
||||
import guan
|
||||
import numpy as np
|
||||
chern_number = guan.calculate_chern_number_for_square_lattice_with_efficient_method(guan.hamiltonian_of_one_QAH_model, precision=100)
|
@@ -1,4 +1,4 @@
|
||||
# 电导和散射矩阵的计算示例
|
||||
# 电导和散射矩阵的计算
|
||||
import guan
|
||||
import numpy as np
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# 使用格林函数计算态密度示例
|
||||
# 使用格林函数计算态密度
|
||||
import guan
|
||||
import numpy as np
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# 波函数规范的选取示例
|
||||
# 波函数规范的选取
|
||||
import numpy as np
|
||||
import cmath
|
||||
import guan
|
@@ -1,4 +1,4 @@
|
||||
# 实空间哈密顿量的示例
|
||||
# 实空间哈密顿量
|
||||
import guan
|
||||
print('\n', guan.hamiltonian_of_finite_size_system_along_one_direction(3), '\n')
|
||||
print(guan.hamiltonian_of_finite_size_system_along_two_directions_for_square_lattice(2, 2), '\n')
|
@@ -1,11 +0,0 @@
|
||||
# 函数的计时器
|
||||
import guan
|
||||
|
||||
@guan.timer_decorator
|
||||
def my_function():
|
||||
import time
|
||||
time.sleep(2)
|
||||
print('Run finished!')
|
||||
|
||||
for _ in range(3):
|
||||
my_function()
|
9
2024.01.27_chat.guanjihuan.com/LICENSE
Normal file
9
2024.01.27_chat.guanjihuan.com/LICENSE
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Ji-Huan Guan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
3
2024.01.27_chat.guanjihuan.com/README.md
Normal file
3
2024.01.27_chat.guanjihuan.com/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## chat.guanjihuan.com
|
||||
|
||||
本仓库记录这篇博文中的代码:https://www.guanjihuan.com/archives/38502
|
15
2024.01.27_chat.guanjihuan.com/download_model.py
Normal file
15
2024.01.27_chat.guanjihuan.com/download_model.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# pip install --upgrade huggingface_hub
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', local_dir_use_symlinks=False)
|
||||
|
||||
# # 选择性下载
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', ignore_patterns='*.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00001-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00002-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00003-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00004-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00005-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00006-of-00007.bin', local_dir_use_symlinks=False)
|
||||
# snapshot_download(repo_id="THUDM/chatglm3-6b-32k", local_dir = './THUDM/chatglm3-6b-32k', allow_patterns='*00007-of-00007.bin', local_dir_use_symlinks=False)
|
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
import ollama
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
model_name = 'llama3.2'
|
||||
|
||||
prompt = st.chat_input("在这里输入您的内容")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_chat(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
response = ollama.chat(model=model_name, messages=st.session_state.messages, stream=True)
|
||||
full_content = ''
|
||||
for part in response:
|
||||
full_content += part['message']['content']
|
||||
message_placeholder.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant',
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response = response_of_chat(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
||||
|
@@ -0,0 +1,105 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
choose_load_method = 1 # 选择加载模型的方式
|
||||
|
||||
if choose_load_method == 0:
|
||||
# 默认加载(需要13G显存)
|
||||
@st.cache_resource
|
||||
def load_model_chatglm3():
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm3-6b-32k",trust_remote_code=True).half().cuda()
|
||||
model = model.eval()
|
||||
return model, tokenizer
|
||||
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
|
||||
|
||||
elif choose_load_method == 1:
|
||||
# 量化加载(需要6G显存)
|
||||
@st.cache_resource
|
||||
def load_model_chatglm3():
|
||||
from transformers import AutoTokenizer, BitsAndBytesConfig, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
|
||||
nf4_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True, quantization_config=nf4_config)
|
||||
model = model.eval()
|
||||
return model, tokenizer
|
||||
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
|
||||
|
||||
elif choose_load_method == 2:
|
||||
# 在CPU上加载(需要25G内存,对话速度会比较慢,不推荐)
|
||||
@st.cache_resource
|
||||
def load_model_chatglm3():
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b-32k", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm3-6b-32k",trust_remote_code=True).float()
|
||||
model = model.eval()
|
||||
return model, tokenizer
|
||||
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
max_length = 409600
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.8
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def chat_response_chatglm3(prompt):
|
||||
history, past_key_values = st.session_state.history_ChatGLM3, st.session_state.past_key_values_ChatGLM3
|
||||
for response, history, past_key_values in model_chatglm3.stream_chat(tokenizer_chatglm3, prompt, history,
|
||||
past_key_values=past_key_values,
|
||||
max_length=max_length, top_p=top_p,
|
||||
temperature=temperature,
|
||||
return_past_key_values=True):
|
||||
message_placeholder_chatglm3.markdown(response)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
|
||||
st.session_state.history_ChatGLM3 = history
|
||||
st.session_state.past_key_values_ChatGLM3 = past_key_values
|
||||
return response
|
||||
|
||||
def clear_all():
|
||||
st.session_state.history_ChatGLM3 = []
|
||||
st.session_state.past_key_values_ChatGLM3 = None
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'history_ChatGLM3' not in st.session_state:
|
||||
st.session_state.history_ChatGLM3 = []
|
||||
if 'past_key_values_ChatGLM3' not in st.session_state:
|
||||
st.session_state.past_key_values_ChatGLM3 = None
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder_chatglm3 = st.empty()
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
chat_response_chatglm3(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,33 @@
|
||||
# basic requirements
|
||||
|
||||
protobuf>=4.25.2
|
||||
transformers>=4.36.2
|
||||
tokenizers>=0.15.0
|
||||
cpm_kernels>=1.0.11
|
||||
torch>=2.1.0
|
||||
gradio>=4.14.0
|
||||
sentencepiece>=0.1.99
|
||||
sentence_transformers>=2.2.2
|
||||
accelerate>=0.26.1
|
||||
streamlit>=1.30.0
|
||||
fastapi>=0.109.0
|
||||
loguru~=0.7.2
|
||||
mdtex2html>=1.2.0
|
||||
latex2mathml>=3.77.0
|
||||
|
||||
# for openai demo
|
||||
|
||||
openai>=1.7.2
|
||||
zhipuai>=2.0.0
|
||||
|
||||
pydantic>=2.5.3
|
||||
sse-starlette>=1.8.2
|
||||
uvicorn>=0.25.0
|
||||
timm>=0.9.12
|
||||
tiktoken>=0.5.2
|
||||
|
||||
# for langchain demo
|
||||
|
||||
langchain>=0.1.0
|
||||
langchainhub>=0.1.14
|
||||
arxiv>=2.1.0
|
@@ -0,0 +1,112 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
@st.cache_resource
|
||||
def load_model_internlm_7B():
|
||||
# internlm(需要 7G 显存)
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
||||
nf4_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True, quantization_config=nf4_config)
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True, torch_dtype=torch.bfloat16)
|
||||
model = model.eval()
|
||||
return model, tokenizer
|
||||
model_internlm_7B, tokenizer_internlm_7B = load_model_internlm_7B()
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
max_length = 409600
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.8
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
from tools.transformers.interface import GenerationConfig, generate_interactive
|
||||
|
||||
def prepare_generation_config():
|
||||
generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature)
|
||||
return generation_config
|
||||
|
||||
def combine_history(prompt, messages):
|
||||
total_prompt = ""
|
||||
for message in messages:
|
||||
cur_content = message["content"]
|
||||
if message["role"] == "user":
|
||||
cur_prompt = user_prompt.replace("{user}", cur_content)
|
||||
elif message["role"] == "robot":
|
||||
cur_prompt = robot_prompt.replace("{robot}", cur_content)
|
||||
else:
|
||||
raise RuntimeError
|
||||
total_prompt += cur_prompt
|
||||
total_prompt = total_prompt + cur_query_prompt.replace("{user}", prompt)
|
||||
return total_prompt
|
||||
|
||||
user_prompt = "<|User|>:{user}<eoh>\n"
|
||||
robot_prompt = "<|Bot|>:{robot}<eoa>\n"
|
||||
cur_query_prompt = "<|User|>:{user}<eoh>\n<|Bot|>:"
|
||||
generation_config = prepare_generation_config()
|
||||
|
||||
if "messages_internlm_7B" not in st.session_state:
|
||||
st.session_state.messages_internlm_7B = []
|
||||
|
||||
from dataclasses import asdict
|
||||
|
||||
def chat_response_internlm_7B(prompt):
|
||||
real_prompt = combine_history(prompt, messages = st.session_state.messages_internlm_7B)
|
||||
st.session_state.messages_internlm_7B.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
for cur_response in generate_interactive(
|
||||
model=model_internlm_7B,
|
||||
tokenizer=tokenizer_internlm_7B,
|
||||
prompt=real_prompt,
|
||||
additional_eos_token_id=103028,
|
||||
**asdict(generation_config),
|
||||
):
|
||||
message_placeholder_internlm_7B.markdown(cur_response + "▌")
|
||||
if stop_button:
|
||||
break
|
||||
message_placeholder_internlm_7B.markdown(cur_response)
|
||||
st.session_state.messages_internlm_7B.append({"role": "robot", "content": cur_response, "avatar": "assistant"})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": cur_response, "avatar": "assistant"})
|
||||
return cur_response
|
||||
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages_internlm_7B = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages_internlm_7B' not in st.session_state:
|
||||
st.session_state.messages_internlm_7B = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder_internlm_7B = st.empty()
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
chat_response_internlm_7B(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,111 @@
|
||||
本目录提供辅助模型训练的一些工具,文件结构如下所示:
|
||||
|
||||
```bash
|
||||
├── transformers # 适配hugging face的transformers的一些工具
|
||||
│ ├── configuration_internlm.py # config适配工具
|
||||
│ ├── modeling_internlm.py # model适配工具
|
||||
│ ├── tokenization_internlm.py # tokenizer适配工具
|
||||
│ └── convert2hf.py # 模型适配hugging face工具
|
||||
└── tokenizer.py # 将原始数据转换成bin和meta文件的工具
|
||||
```
|
||||
|
||||
# tokenizer.py
|
||||
|
||||
生成原始数据的`bin`和`meta`文件需要使用`tokenizer`,我们通过在`tools/tokenizer.py`中指定模型参数路径的方式来导入tokenizer模型。目前我们提供了`V7_sft.model`来生成tokens。若想使用不同的模型,可直接修改`tokernizer.py`中的模型参数路径。
|
||||
|
||||
可以运行以下命令生成原始数据对应的`bin`和`meta`文件,其中参数`text_input_path`表示原始文本数据路径,目前支持`txt`、`json`和`jsonl`三种输入格式,`bin_output_path`表示生成的`bin`文件的保存路径。
|
||||
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
|
||||
```
|
||||
|
||||
下面是一个数据处理的例子:
|
||||
|
||||
给定一个包含原始数据集的文件`raw_data.txt`,原始数据集如下所示:
|
||||
|
||||
```bash
|
||||
感恩生活中的每一个细节,才能真正体会到幸福的滋味。
|
||||
梦想是人生的动力源泉,努力追逐,才能实现自己的目标。
|
||||
学会宽容和理解,才能建立真正和谐的人际关系。
|
||||
```
|
||||
|
||||
可以通过运行以下命令来生成`bin`和`meta`文件:
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path raw_data.txt --bin_output_path cn/output.bin
|
||||
```
|
||||
|
||||
需要注意的是,生成的`bin`文件需要保存在`cn`或者`en`或者`code`或者`ja`或者`ar`或者`kaoshi`这五个目录下,以区分数据集的类型。
|
||||
|
||||
其中,`cn`表示中文数据集;`en`表示英文数据集;`code`表示代码数据集;`ja`表示日语数据集;`ar`表示阿拉伯语数据集;`kaoshi`表示考试数据集。
|
||||
|
||||
生成的bin文件的格式如下:
|
||||
|
||||
```python
|
||||
{"tokens": [73075, 75302, 69522, 69022, 98899, 67713, 68015, 81269, 74637, 75445, 99157]}
|
||||
{"tokens": [69469, 60355, 73026, 68524, 60846, 61844, 98899, 67775, 79241, 98899, 67713, 67800, 67453, 67838, 99157]}
|
||||
{"tokens": [68057, 79017, 60378, 68014, 98899, 67713, 67990, 68015, 70381, 67428, 61003, 67622, 99157]}
|
||||
```
|
||||
|
||||
`bin`文件中的每一行均对应原始数据集中的每一个句子,表示每个句子的`token`(下文将用sequence指定)。
|
||||
|
||||
生成的`meta`文件的格式如下:
|
||||
|
||||
```bash
|
||||
(0, 11), (90, 15), (208, 13)
|
||||
```
|
||||
|
||||
在`meta`文件中,每个元组对应着`bin`文件中每一个`sequence`的元信息。其中,元组的第一个元素表示每个`sequence`在所有`sequence`中的`starting index`,第二个元素表示每个`sequence`中有多少个`tokens`。
|
||||
|
||||
例如,对于第一个`sequence`,`starting index`为 0,有 11 个`tokens`;对于第二个`sequence`,由于第一个`sequence`转换为`string`后的长度为`89`,因此它的`starting index`为 90,有 15 个`tokens`。
|
||||
|
||||
`json`和`jsonl`类型的文件的`bin`和`meta`文件格式和`txt`一致,此处不再赘叙。
|
||||
|
||||
# pal_inference.py
|
||||
|
||||
在 [GSM8K](https://huggingface.co/datasets/gsm8k) 数据集上使用 [PAL](https://github.com/reasoning-machines/pal) 范式推理,使模型编写代码并通过 Python 解释器执行来解决数学问题。其用法如下:
|
||||
|
||||
```python
|
||||
# 用法:
|
||||
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
|
||||
|
||||
# 参数:
|
||||
# <model> 用于推理的模型的路径。
|
||||
# <out_dir> 生成代码将保存在指定的输出文件夹中。
|
||||
|
||||
# 可选参数:
|
||||
# --dataset <dataset> 用于代码生成的数据集名称(默认:gsm8k)。
|
||||
# --max_length <length> 模型最大输入 token 长度(默认:2048)。
|
||||
# --top_p <threshold> 候选 token 相加的概率阈值(默认:0.8)。
|
||||
# --eoh <end token> 用户输入结束标识符 (默认: "") 。
|
||||
# --eoa <end token> 模型输入结束标识符 (默认: "") 。
|
||||
# --eos <end token> 系统输入结束标识符. (默认: "") 。
|
||||
# --temperature, -t <temp> 生成过程中的采样温度(默认:1.0)。
|
||||
# --time_out <time> 执行生成的代码的最大时间(秒)(默认:100)。
|
||||
# --verbose, -v 打印代码错误信息(可选)。
|
||||
# --append, -a 将输出追加到历史结果中(可选)。
|
||||
```
|
||||
|
||||
以下是使用示例:
|
||||
|
||||
```bash
|
||||
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
|
||||
```
|
||||
|
||||
其输出文件每一行包括输入的问题,正确答案,执行答案,得分,以及模型生成的 Python 代码块:
|
||||
|
||||
````json
|
||||
{
|
||||
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
|
||||
"target": 18.0,
|
||||
"answer": 18.0,
|
||||
"score": 1,
|
||||
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
|
||||
}
|
||||
````
|
||||
|
||||
InternLM 在 GSM8K 数据集中带工具和不带工具的性能表现:
|
||||
|
||||
| Method | **InternLM-Chat-7B** |
|
||||
| -------- | -------------------- |
|
||||
| w/o tool | 34.5 |
|
||||
| w tool | 39.2 |
|
@@ -0,0 +1,109 @@
|
||||
This directory provide some tools for model training with the following file structure.
|
||||
|
||||
```bash
|
||||
├── transformers # tools for adapting Hugging Face's transformers
|
||||
│ ├── configuration_internlm.py # tools for adapting config
|
||||
│ ├── modeling_internlm.py # tools for adapting model
|
||||
│ └── tokenization_internlm.py # tools for adapting tokenizer
|
||||
│ └── convert2hf.py # tools for adapting models to Hugging Face's format
|
||||
└── tokenizer.py # tools for generating `bin` and `meta` file for raw data
|
||||
```
|
||||
|
||||
# tokenizer.py
|
||||
|
||||
We need to use a `tokenizer` to generate `bin` and `meta` files for raw data. We import the tokenizer model by specifying the model weight path in `tools/tokenizer.py`. Currently, we provide `V7.model` to generate tokens. If you want to use a different model, you can modify the model weight path in `tokenizer.py` directly.
|
||||
|
||||
We can run the following command to generate `bin` and `meta` files corresponding to the original data. The parameter `text_input_path` represents the path of the original text data, currently supporting `txt`, `json`, and `jsonl` formats, while `bin_output_path` represents the save path of the generated `bin` files.
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
|
||||
```
|
||||
|
||||
An example of data processing in `txt` format is given here:
|
||||
|
||||
Given a file `raw_data.txt` containg raw data with the following content.
|
||||
|
||||
```bash
|
||||
Appreciate every detail in life to truly taste the flavor of happiness.
|
||||
Dreams are the source of life’s motivation. Pursue them diligently to achieve your goals.
|
||||
Learn to be tolerant and understanding to establish truly harmonious interpersonal relationships.
|
||||
```
|
||||
|
||||
Next, we can run the following command to generate `bin` and `meta` files for raw data.
|
||||
|
||||
```bash
|
||||
$ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path
|
||||
```
|
||||
|
||||
It should be noted that the generated `bin` files should be placed in one of the following directories to clarify the data type: `cn`(Chinese), `en`(English), `code`(code data), `ja`(Japanese), `ar`(Arabic) and `kaoshi`(kaoshi data).
|
||||
|
||||
The format of generated `bin` file is as follows.
|
||||
|
||||
```python
|
||||
{"tokens": [98655, 2317, 2922, 6649, 1595, 7856, 435, 2424, 442, 9556, 12807, 410, 17313, 446, 23331, 95746]}
|
||||
{"tokens": [98655, 302, 1383, 269, 657, 410, 2687, 446, 2424, 98667, 269, 25220, 281, 523, 1874, 492, 1248, 38127, 4563, 442, 11227, 829, 8980, 95746]}
|
||||
{"tokens": [98655, 24190, 442, 517, 15013, 649, 454, 8793, 442, 5849, 9556, 17917, 1369, 1084, 29890, 12021, 95746]}
|
||||
```
|
||||
|
||||
In the generated `bin` file, each line (`sequence`) corresponds to the `tokens` for each sentence in the raw data.
|
||||
|
||||
The format of generated `meta` file in as follows.
|
||||
|
||||
```bash
|
||||
(0, 16), (110, 24), (262, 17)
|
||||
```
|
||||
|
||||
Each tuple in the `meta` file represents the meta information of each `sequence` where the first element in the tuple indicates the `starting index` of each `sequence` among all `sequences` and the second element indicates the amount of `tokens` for each `sequence`.
|
||||
|
||||
For example, the `starting index` is 0 for the first `sequence` with 16 `tokens`. Since the length of `sequence` in `string` format is 109, the `starting index` is 110. And the number of `tokens` of the sencond `sequence` is 24.
|
||||
|
||||
The `bin` and `meta` file formats for `json` and `jsonl` type files are the same as for `txt`, so we won't go over them here.
|
||||
|
||||
# pal_inference.py
|
||||
|
||||
Perform reasoning using [PAL](https://github.com/reasoning-machines/pal) on the [GSM8K](https://huggingface.co/datasets/gsm8k) dataset, allowing the model to generate code and solve mathematical problems through Python interpretation. Here's how you can use it:
|
||||
|
||||
```bash
|
||||
# Usage:
|
||||
python pal_inference.py <model> <out_dir> [--dataset <dataset>] [--max_length <length>] [--top_p <threshold>] [--eoh <end token>] [--eoa <end token>] [--eos <end token>] [--temperature <temp>] [--time_out <time>] [--verbose, -v] [--append, -a]
|
||||
|
||||
# Parameters:
|
||||
# <model> Path to the model used for inference.
|
||||
# <out_dir> Generated code will be saved in the specified output folder.
|
||||
|
||||
# Optional arguments:
|
||||
# --dataset <dataset> Dataset name used for code generation (default: gsm8k).
|
||||
# --max_length <length> Model's maximum input token length (default: 2048).
|
||||
# --top_p <threshold> Probability threshold for candidate tokens (default: 0.8).
|
||||
# --eoh <end token> End of human (user) token. (default: "").
|
||||
# --eoa <end token> End of assistant (bot) token. (default: "").
|
||||
# --eos <end token> End of system token. (default: "").
|
||||
# --temperature, -t <temp> Sampling temperature during generation (default: 1.0).
|
||||
# --time_out <time> Maximum time (in seconds) for executing the generated code (default: 100).
|
||||
# --verbose, -v Print code error messages (optional).
|
||||
# --append, -a ppend the output to historical results (optional).
|
||||
```
|
||||
|
||||
Below is an example of usage:
|
||||
|
||||
```bash
|
||||
python tools/pal_inference.py internlm/internlm-chat-7k ./output -v
|
||||
```
|
||||
|
||||
The output file contains each line with the input question, the correct answer, the executed answer, the score, and the Python code block generated by the model:
|
||||
|
||||
````json
|
||||
{
|
||||
"question": "Janet\u2019s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
|
||||
"target": 18.0,
|
||||
"answer": 18.0,
|
||||
"score": 1,
|
||||
"generation": ["```python\ndef solution():\n eggs_per_day = 16\n eggs_per_breakfast = 3\n eggs_per_muffin = 4\n eggs_used = eggs_per_day - eggs_per_breakfast - eggs_per_muffin\n eggs_sold = eggs_used\n price_per_egg = 2\n eggs_made = eggs_sold * price_per_egg\n result = eggs_made\n return result\n```"]
|
||||
}
|
||||
````
|
||||
|
||||
InternLM performance in the GSM8K dataset with and without tools:
|
||||
|
||||
| Method | **InternLM-Chat-7B** |
|
||||
| -------- | -------------------- |
|
||||
| w/o tool | 34.5 |
|
||||
| w tool | 39.2 |
|
Binary file not shown.
@@ -0,0 +1,164 @@
|
||||
import argparse
|
||||
import json
|
||||
import os.path as osp
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import sentencepiece as spm
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
def process(dataset_path, sp_model):
|
||||
"""Process data sample from input dataset
|
||||
|
||||
Args:
|
||||
dataset_path (str): Path of dataset json file.
|
||||
sp_model (str): Path of tokenizer.
|
||||
|
||||
Yields:
|
||||
tuple: dumped processed data sample and length of tokens.
|
||||
"""
|
||||
|
||||
dataset = json.load(open(dataset_path))
|
||||
|
||||
for data in dataset:
|
||||
yield tokenize(get_chat_format_data(data), sp_model)
|
||||
|
||||
|
||||
def get_chat_format_data(ori_data):
|
||||
"""Format original data
|
||||
|
||||
Args:
|
||||
ori_data (dict): input data sample.
|
||||
|
||||
Returns:
|
||||
dict: data sample with chat format.
|
||||
"""
|
||||
input_str = ori_data["input"]
|
||||
instruction_str = ori_data["instruction"]
|
||||
output_str = ori_data["output"]
|
||||
data = dict()
|
||||
if input_str != "":
|
||||
data["user"] = f"<|User|>:{instruction_str}\n{input_str}"
|
||||
else:
|
||||
data["user"] = f"<|User|>:{instruction_str}"
|
||||
data["bot"] = f"<|Bot|>:{output_str}"
|
||||
return data
|
||||
|
||||
|
||||
def tokenize(sample, sp_model):
|
||||
"""Tokenize input dataset
|
||||
|
||||
Args:
|
||||
sample (dict): Input data sample.
|
||||
sp_model (str): Path of tokenizer.
|
||||
|
||||
Returns:
|
||||
tuple: dumped processed data sample and length of tokens.
|
||||
"""
|
||||
special_tokens_map = {"<eoh>": 103167, "<eoa>": 103166, "nl_id": 13}
|
||||
token_ids = [sp_model.bos_id()]
|
||||
human_s = sample["user"]
|
||||
ass_s = sample["bot"]
|
||||
|
||||
human_ids = sp_model.encode(human_s) + [special_tokens_map["<eoh>"], special_tokens_map["nl_id"]]
|
||||
human_ids_ignore = [-token_id for token_id in human_ids]
|
||||
|
||||
ass_template_ids = sp_model.encode("<|Bot|>:")
|
||||
ass_template_ids_ignore = [-token_ids for token_ids in ass_template_ids]
|
||||
ass_ids = (
|
||||
ass_template_ids_ignore
|
||||
+ sp_model.encode(ass_s[8:])
|
||||
+ [special_tokens_map["<eoa>"], special_tokens_map["nl_id"]]
|
||||
)
|
||||
|
||||
token_ids += human_ids_ignore + ass_ids
|
||||
if len(token_ids) > 2047:
|
||||
token_ids = token_ids[:2047]
|
||||
token_ids += [sp_model.eos_id()]
|
||||
line = str.encode(json.dumps({"tokens": token_ids}) + "\n")
|
||||
return line, len(token_ids)
|
||||
|
||||
|
||||
def dump_bin_meta_bin(samples, path, split_ratio=0.1):
|
||||
"""Dump processed dataset
|
||||
|
||||
Args:
|
||||
samples (dict): Input data sample.
|
||||
path (str): Path for output dataset.
|
||||
split_ratio (float): Ratio for validation dataset splitting.
|
||||
Default to: 0.1.
|
||||
|
||||
Returns:
|
||||
tuple: number of train/valid tokens of processed dataset,
|
||||
number of train/valid samples of processed dataset.
|
||||
"""
|
||||
|
||||
train_path = osp.join(path, "train/en/")
|
||||
valid_path = osp.join(path, "valid/en/")
|
||||
train_dir = Path(train_path)
|
||||
valid_dir = Path(valid_path)
|
||||
train_dir.mkdir(exist_ok=True, parents=True)
|
||||
valid_dir.mkdir(exist_ok=True, parents=True)
|
||||
train_f = open(train_dir.joinpath("dataset.bin"), "wb")
|
||||
valid_f = open(valid_dir.joinpath("dataset.bin"), "wb")
|
||||
|
||||
train_tokens = 0
|
||||
valid_tokens = 0
|
||||
last_train_position = 0
|
||||
last_valid_position = 0
|
||||
train_samples = 0
|
||||
valid_samples = 0
|
||||
train_meta = []
|
||||
valid_meta = []
|
||||
|
||||
sample_length = len(samples)
|
||||
np.random.seed(0)
|
||||
valid_indices = np.random.choice(range(sample_length), int(sample_length * split_ratio)).tolist()
|
||||
|
||||
count = -1
|
||||
for line, token_num in samples:
|
||||
count += 1
|
||||
if count in valid_indices:
|
||||
valid_tokens += token_num
|
||||
valid_f.write(line)
|
||||
valid_meta.append((last_valid_position, token_num))
|
||||
last_valid_position += len(line)
|
||||
valid_samples += 1
|
||||
else:
|
||||
train_tokens += token_num
|
||||
train_f.write(line)
|
||||
train_meta.append((last_train_position, token_num))
|
||||
last_train_position += len(line)
|
||||
train_samples += 1
|
||||
|
||||
train_f.close()
|
||||
valid_f.close()
|
||||
np.save(open(train_dir.joinpath("dataset.bin.meta"), "wb"), train_meta)
|
||||
np.save(open(valid_dir.joinpath("dataset.bin.meta"), "wb"), valid_meta)
|
||||
|
||||
return train_tokens, valid_tokens, train_samples, valid_samples
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("dataset_path", type=str, help="path of dataset json file")
|
||||
parser.add_argument("output_path", type=str, help="path of processed dataset")
|
||||
parser.add_argument("tokenizer_path", type=str, help="path of tokenizer")
|
||||
parser.add_argument("--split_ratio", type=float, default=0.1, help="ratio for validation dataset splitting")
|
||||
|
||||
args = parser.parse_args()
|
||||
sp_model = spm.SentencePieceProcessor(model_file=args.tokenizer_path)
|
||||
split_ratio = args.split_ratio
|
||||
samples = []
|
||||
|
||||
dataset = process(args.dataset_path, sp_model)
|
||||
for sample in tqdm(dataset):
|
||||
samples.append(sample)
|
||||
|
||||
train_tokens, valid_tokens, train_samples, valid_samples = dump_bin_meta_bin(
|
||||
samples, args.output_path, args.split_ratio
|
||||
)
|
||||
print(f"number of train dataset: {train_samples}, number of train dataset token: {train_tokens}")
|
||||
print(f"number of validation dataset: {valid_samples}, number of validation dataset token: {valid_tokens}")
|
@@ -0,0 +1,320 @@
|
||||
# This file is modified from:
|
||||
# hhttps://github.com/reasoning-machines/pal/blob/main/pal/core/interface.py
|
||||
#
|
||||
# Copyright 2022 PAL Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
from dataclasses import asdict
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from tools.transformers.interface import GenerationConfig, generate_interactive
|
||||
from internlm.utils.timeout import Timeout
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="PAL Inference")
|
||||
parser.add_argument("model", type=str, help="Path to the pre-trained LLM used for inference.")
|
||||
parser.add_argument(
|
||||
"out_dir", type=str, help="Name of the output folder where generated code snippets will be saved."
|
||||
)
|
||||
parser.add_argument("--dataset", default="gsm8k", type=str, help="Name of the dataset used for code generation.")
|
||||
parser.add_argument(
|
||||
"--max_length",
|
||||
default=2048,
|
||||
type=int,
|
||||
help="Maximum input token length for the natural language description.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--top_p",
|
||||
default=0.8,
|
||||
type=float,
|
||||
help="Probability threshold to choose sample tokens during generation.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eoh",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of human (user) token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eoa",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of assistant (bot) token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--eos",
|
||||
default="",
|
||||
type=str,
|
||||
help="End of system token.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--temperature", "-t", default=1.0, type=float, help="Temperature of token sampling during generation."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time_out", default=100, type=float, help="Maximum time allowed for executing generated code."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
action="store_true",
|
||||
help="Print code error information when executing generated code (optional).",
|
||||
)
|
||||
parser.add_argument("--append", "-a", action="store_true", help="Append output to the history results (optional).")
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
class GenericRuntime:
|
||||
"""Adapted from https://github.com/reasoning-machines/pal"""
|
||||
|
||||
GLOBAL_DICT: dict = {}
|
||||
LOCAL_DICT = None
|
||||
HEADERS: List = []
|
||||
|
||||
def __init__(self):
|
||||
self._global_vars = copy.copy(self.GLOBAL_DICT)
|
||||
self._local_vars = copy.copy(self.LOCAL_DICT) if self.LOCAL_DICT else None
|
||||
|
||||
for c in self.HEADERS:
|
||||
self.exec_code(c)
|
||||
|
||||
def exec_code(self, code_piece: str) -> None:
|
||||
exec(code_piece, self._global_vars)
|
||||
|
||||
def eval_code(self, expr: str) -> Any:
|
||||
return eval(expr, self._global_vars)
|
||||
|
||||
def inject(self, var_dict: Dict[str, Any]) -> None:
|
||||
for k, v in var_dict.items():
|
||||
self._global_vars[k] = v
|
||||
|
||||
@property
|
||||
def answer(self):
|
||||
return self._global_vars["answer"]
|
||||
|
||||
|
||||
class PALInterface:
|
||||
"""PAL interface wrap fun:`generate_interactive` to extract and execute
|
||||
generated code.
|
||||
|
||||
Adapted from https://github.com/reasoning-machines/pal
|
||||
|
||||
Args:
|
||||
model (AutoModelForCausalLM)
|
||||
tokenizer (AutoTokenizer)
|
||||
generation_config (GenerationConfig): Decode strategies
|
||||
additional_eos_token_id (int): End of sentence token id, default: 103028
|
||||
get_answer_expr (str): The function name of generated code, default: "solution()"
|
||||
verbose (bool): Print error information
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: AutoModelForCausalLM,
|
||||
tokenizer: AutoTokenizer,
|
||||
generation_config: GenerationConfig,
|
||||
additional_eos_token_id: int = 103028,
|
||||
get_answer_expr: str = "solution()",
|
||||
verbose: bool = False,
|
||||
):
|
||||
self.runtime = GenericRuntime()
|
||||
self.history: List = []
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.generation_config = generation_config
|
||||
self.additional_eos_token_id = additional_eos_token_id
|
||||
self.answer_expr = get_answer_expr
|
||||
self.verbose = verbose
|
||||
|
||||
def generate(self, prompt):
|
||||
# The api will generate response word by word
|
||||
# we only need the last generation as the final results
|
||||
for cur_gen in generate_interactive(
|
||||
model=self.model,
|
||||
tokenizer=self.tokenizer,
|
||||
prompt=prompt,
|
||||
additional_eos_token_id=self.additional_eos_token_id,
|
||||
**asdict(self.generation_config),
|
||||
):
|
||||
continue
|
||||
# Get final response
|
||||
self.history.append(cur_gen)
|
||||
# Extract code block
|
||||
code = self.process_generation_to_code(cur_gen)
|
||||
return code
|
||||
|
||||
def process_generation_to_code(self, gens: str):
|
||||
if "```python" in gens:
|
||||
gens = gens.split("```python")[1].split("```")[0]
|
||||
elif "```" in gens:
|
||||
gens = gens.split("```")[1].split("```")[0]
|
||||
code = gens.split("\n")
|
||||
return code
|
||||
|
||||
def run(self, prompt, time_out: float = 100):
|
||||
code = self.generate(prompt)
|
||||
with Timeout(time_out):
|
||||
try:
|
||||
exec_result = self.execute(code)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
print(e)
|
||||
return exec_result
|
||||
|
||||
def execute(self, code: List[str]):
|
||||
self.runtime.exec_code("\n".join(code))
|
||||
return self.runtime.eval_code(self.answer_expr)
|
||||
|
||||
def clear_history(self):
|
||||
self.history = []
|
||||
|
||||
|
||||
def load_model(args):
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model, trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
def load_data(args):
|
||||
# Load data from huggingface dataset
|
||||
if args.dataset == "gsm8k":
|
||||
gsm8k = load_dataset(path=args.dataset, name="main")
|
||||
test_set = gsm8k["test"]
|
||||
input_data = []
|
||||
for data in test_set:
|
||||
question = data["question"]
|
||||
target = float(data["answer"].split("#")[-1].replace(",", ""))
|
||||
input_data.append({"question": question, "target": target})
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return input_data
|
||||
|
||||
|
||||
PROMPT = """<|System|>:You are a helpful assistant which use tools to solve mathematical reasoning questions. The tools you can use are:
|
||||
PythonExecutor: It can execute Python code. The code must be a function, and the function name must be 'solution'. The example format is as follows:
|
||||
```python
|
||||
def solution():
|
||||
variable_names_with_real_meaning = func(variable)
|
||||
return variable_names_with_real_meaning
|
||||
```{eos}
|
||||
<|User|>:Olivia has $23. She bought five bagels for $3 each. How much money does she have left?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
money_initial = 23
|
||||
bagels = 5
|
||||
bagel_cost = 3
|
||||
money_spent = bagels * bagel_cost
|
||||
money_left = money_initial - money_spent
|
||||
result = money_left
|
||||
return result
|
||||
```{eoa}
|
||||
<|User|>:Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
golf_balls_initial = 58
|
||||
golf_balls_lost_tuesday = 23
|
||||
golf_balls_lost_wednesday = 2
|
||||
golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday
|
||||
result = golf_balls_left
|
||||
return result
|
||||
```{eoa}
|
||||
<|User|>:There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?{eoh}
|
||||
<|Bot|>:
|
||||
```python
|
||||
def solution():
|
||||
computers_initial = 9
|
||||
computers_per_day = 5
|
||||
num_days = 4 # 4 days between monday and thursday
|
||||
computers_added = computers_per_day * num_days
|
||||
computers_total = computers_initial + computers_added
|
||||
result = computers_total
|
||||
return result
|
||||
```{eoa}
|
||||
<|System|>:How about this question?{eos}
|
||||
<|User|>:{question}{eoh}
|
||||
<|Bot|>:""".strip()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_args()
|
||||
|
||||
print("load model begin.")
|
||||
model, tokenizer = load_model(args)
|
||||
print("load model end.")
|
||||
|
||||
generation_config = GenerationConfig(max_length=args.max_length, top_p=args.top_p, temperature=args.temperature)
|
||||
|
||||
verbose = args.verbose
|
||||
interface = PALInterface(model=model, tokenizer=tokenizer, generation_config=generation_config, verbose=verbose)
|
||||
|
||||
if not os.path.exists(args.out_dir):
|
||||
os.makedirs(args.out_dir)
|
||||
savepath = os.path.join(args.out_dir, args.dataset + ".json")
|
||||
|
||||
# Load from history results
|
||||
if args.append and os.path.exists(savepath):
|
||||
lines = open(savepath).readlines()
|
||||
num_skip_exps = len(lines)
|
||||
scores = [x["score"] for x in map(json.loads, lines)]
|
||||
else:
|
||||
num_skip_exps = 0
|
||||
scores = []
|
||||
|
||||
examples = load_data(args)
|
||||
with open(savepath, "a" if args.append else "w") as f:
|
||||
pbar = tqdm.tqdm(examples[num_skip_exps:], initial=num_skip_exps, total=len(examples))
|
||||
for x in pbar:
|
||||
question = x["question"]
|
||||
result = copy.copy(x)
|
||||
|
||||
try:
|
||||
answer = interface.run(
|
||||
prompt=PROMPT.format(question=question, eoh=args.eoh, eoa=args.eoa, eos=args.eos),
|
||||
time_out=args.time_out,
|
||||
)
|
||||
answer = float(answer)
|
||||
score = 1 if abs(answer - x["target"]) < 1e-3 else 0
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(e)
|
||||
answer = ""
|
||||
score = 0
|
||||
scores.append(score)
|
||||
result["answer"] = answer
|
||||
result["score"] = score
|
||||
result["generation"] = interface.history
|
||||
f.write(json.dumps(result) + "\n")
|
||||
|
||||
interface.clear_history()
|
||||
f.flush()
|
||||
|
||||
print(f"{args.model}: Accuracy - {sum(scores) / len(scores)}")
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -0,0 +1,142 @@
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
model_path = os.path.join(current_dir, "V7_sft.model")
|
||||
sys.path.append(os.path.join(current_dir, "transformers"))
|
||||
from tokenization_internlm import InternLMTokenizer
|
||||
|
||||
tokenizer = InternLMTokenizer(vocab_file=model_path)
|
||||
|
||||
|
||||
def write_bin(context: str, bin_file) -> None:
|
||||
"""
|
||||
Write bin file based on the context.
|
||||
|
||||
Args:
|
||||
context (str): the context of raw file.
|
||||
bin_file (file handler): the opened bin file.
|
||||
|
||||
Example:
|
||||
>>> write_bin("今天天气晴朗适合出门散步", "out.bin") # the output file format is 'txt'
|
||||
>>> out.bin
|
||||
>>> {"tokens": [67577, 69095, 63010, 61770, 67783, 69301, 74732]}
|
||||
"""
|
||||
# encode the context into tokens, which is a list, eg. [67577, 69095, 63010, 61770, 67783, 69301, 74732]
|
||||
tokens = tokenizer.encode(context)
|
||||
# transfer the list into dic, key is str 'tokens', value is tokens.
|
||||
# eg. {"tokens": [67577, 69095, 63010, 61770, 67783, 69301, 74732]}
|
||||
data = dict(tokens=tokens)
|
||||
# encode the data into bytes to save
|
||||
saved_bin = str.encode(json.dumps(data) + "\n")
|
||||
|
||||
# write bytes into bin_file
|
||||
bin_file.write(saved_bin)
|
||||
|
||||
|
||||
def prepare_meta(bin_output_path: str):
|
||||
"""
|
||||
Prepare metadata for the given bin file.
|
||||
|
||||
Args:
|
||||
bin_output_path (str): Output bin file path.
|
||||
"""
|
||||
meta = []
|
||||
cur = 0
|
||||
with open(bin_output_path, "rb") as f:
|
||||
while True:
|
||||
# read lines
|
||||
line = f.readline()
|
||||
# if line is empty, then break
|
||||
if line == b"":
|
||||
break
|
||||
# obtain the token amount of each line
|
||||
length = len(json.loads(line)["tokens"])
|
||||
# meta is a list of tuple(cur, length)
|
||||
# cur: the start index of each line
|
||||
# length: the token amount of each line
|
||||
meta.append((cur, length))
|
||||
# update the cur to generate the meta information of next line
|
||||
cur += len(line)
|
||||
|
||||
# define path of the generated meta file
|
||||
meta_fp = bin_output_path + ".meta"
|
||||
# save the generated meta information
|
||||
with open(meta_fp, "wb") as f:
|
||||
meta = np.array(meta, dtype=np.int32)
|
||||
np.save(f, meta)
|
||||
|
||||
|
||||
def text2bin(text_input_path: str, bin_output_path: str):
|
||||
"""
|
||||
Read content from the input file and write to bin file.
|
||||
Currently support 3 input formats: 'txt', 'json' and 'jsonl'.
|
||||
|
||||
Args:
|
||||
text_input_path (str): txt file path.
|
||||
bin_output_path (str): output bin file path.
|
||||
"""
|
||||
# Check if the txt file exists
|
||||
if not os.path.isfile(text_input_path):
|
||||
raise FileNotFoundError(f"{text_input_path} does not exist.")
|
||||
|
||||
file_format = text_input_path.split(".")[-1]
|
||||
assert file_format in ["txt", "json", "jsonl"], print(
|
||||
"Invalid input file type. Currently support `txt`, `json` and `jsonl`."
|
||||
)
|
||||
|
||||
with open(text_input_path, "r") as text_file, open(bin_output_path, "ab") as bin_file:
|
||||
if file_format == "txt":
|
||||
for line in text_file:
|
||||
# Strip any leading/trailing whitespace
|
||||
stripped_line = line.strip()
|
||||
if stripped_line:
|
||||
# Pass each line to the write_bin function
|
||||
write_bin(stripped_line, bin_file)
|
||||
|
||||
elif file_format == "json":
|
||||
data = json.load(text_file)
|
||||
# assuming data is a list of dictionaries
|
||||
for record in data:
|
||||
# the type of record is dict, transfer the dict into str
|
||||
context = json.dumps(record)
|
||||
# encode the str and write into bin
|
||||
write_bin(context, bin_file)
|
||||
|
||||
elif file_format == "jsonl":
|
||||
for line in text_file:
|
||||
# encode the str and write into bin
|
||||
write_bin(line, bin_file)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--text_input_path",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to the input text file.",
|
||||
)
|
||||
parser.add_argument("--bin_output_path", type=str, required=True, help="Path to the output bin file.")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
# parse arguments
|
||||
args = parse_args()
|
||||
|
||||
text2bin(args.text_input_path, args.bin_output_path)
|
||||
print(f"Successfully converted {args.text_input_path} to {args.bin_output_path}")
|
||||
|
||||
# To avoid potential read/write errors, the metadata preparation follows after creating the .bin file.
|
||||
prepare_meta(args.bin_output_path)
|
||||
print(f"Successfully generated {args.bin_output_path}.meta")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -0,0 +1,25 @@
|
||||
# InternLM Transformers
|
||||
|
||||
[English](./README.md) |
|
||||
[简体中文](./README-zh-Hans.md)
|
||||
|
||||
该文件夹下包含了 transformers 格式的 `InternLM` 模型。
|
||||
|
||||
|
||||
## 权重转换
|
||||
|
||||
`convert2hf.py` 可以将训练保存的权重一键转换为 transformers 格式。在仓库根目录运行以下命令:
|
||||
|
||||
```bash
|
||||
python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
|
||||
```
|
||||
|
||||
然后可以使用 `from_pretrained` 接口加载:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModel
|
||||
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
|
||||
```
|
||||
|
||||
|
||||
`intern_moss_example.py` 展示了如何使用 LoRA 来在 `fnlp/moss-moon-002-sft` 数据集上进行微调的样例。
|
@@ -0,0 +1,23 @@
|
||||
# InternLM Transformers
|
||||
|
||||
[English](./README.md) |
|
||||
[简体中文](./README-zh-Hans.md)
|
||||
|
||||
This folder contains the `InternLM` model in transformers format.
|
||||
|
||||
## Weight Conversion
|
||||
|
||||
`convert2hf.py` can convert saved training weights into the transformers format with a single command. Execute the command in the root directory of repository:
|
||||
|
||||
```bash
|
||||
python tools/transformers/convert2hf.py --src_folder origin_ckpt/ --tgt_folder hf_ckpt/ --tokenizer ./tools/V7_sft.model
|
||||
```
|
||||
|
||||
Then, you can load it using the `from_pretrained` interface:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModel
|
||||
>>> model = AutoModel.from_pretrained("hf_ckpt/", trust_remote_code=True).cuda()
|
||||
```
|
||||
|
||||
`intern_moss_example.py` demonstrates an example of how to use LoRA for fine-tuning on the `fnlp/moss-moon-002-sft` dataset.
|
@@ -0,0 +1,120 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
||||
# and OPT implementations in this library. It has been modified from its
|
||||
# original forms to accommodate minor architectural differences compared
|
||||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" InternLM model configuration"""
|
||||
|
||||
from transformers.utils import logging
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
||||
|
||||
|
||||
class InternLMConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
|
||||
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
||||
defaults will yield a similar configuration to that of the InternLM-7B.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 32000):
|
||||
Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`InternLMModel`]
|
||||
hidden_size (`int`, *optional*, defaults to 4096):
|
||||
Dimension of the hidden representations.
|
||||
intermediate_size (`int`, *optional*, defaults to 11008):
|
||||
Dimension of the MLP representations.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 32):
|
||||
Number of hidden layers in the Transformer encoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 32):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
||||
The non-linear activation function (function or string) in the decoder.
|
||||
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
||||
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
||||
just in case (e.g., 512 or 1024 or 2048).
|
||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
|
||||
The epsilon used by the rms normalization layers.
|
||||
use_cache (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
||||
relevant if `config.is_decoder=True`.
|
||||
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
||||
Whether to tie weight embeddings
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import InternLMModel, InternLMConfig
|
||||
|
||||
>>> # Initializing a InternLM internlm-7b style configuration
|
||||
>>> configuration = InternLMConfig()
|
||||
|
||||
>>> # Initializing a model from the internlm-7b style configuration
|
||||
>>> model = InternLMModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
model_type = "internlm"
|
||||
_auto_class = "AutoConfig"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=103168,
|
||||
hidden_size=4096,
|
||||
intermediate_size=11008,
|
||||
num_hidden_layers=32,
|
||||
num_attention_heads=32,
|
||||
hidden_act="silu",
|
||||
max_position_embeddings=2048,
|
||||
initializer_range=0.02,
|
||||
rms_norm_eps=1e-6,
|
||||
use_cache=True,
|
||||
pad_token_id=0,
|
||||
bos_token_id=1,
|
||||
eos_token_id=2,
|
||||
tie_word_embeddings=False,
|
||||
bias=True,
|
||||
**kwargs,
|
||||
):
|
||||
self.vocab_size = vocab_size
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.hidden_act = hidden_act
|
||||
self.initializer_range = initializer_range
|
||||
self.rms_norm_eps = rms_norm_eps
|
||||
self.use_cache = use_cache
|
||||
self.bias = bias
|
||||
super().__init__(
|
||||
pad_token_id=pad_token_id,
|
||||
bos_token_id=bos_token_id,
|
||||
eos_token_id=eos_token_id,
|
||||
tie_word_embeddings=tie_word_embeddings,
|
||||
**kwargs,
|
||||
)
|
@@ -0,0 +1,175 @@
|
||||
import argparse
|
||||
import math
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
import torch
|
||||
from modeling_internlm import InternLMConfig, InternLMForCausalLM
|
||||
from tokenization_internlm import InternLMTokenizer
|
||||
|
||||
NUM_SHARDS = {
|
||||
"7B": 1,
|
||||
}
|
||||
|
||||
|
||||
def convert2hf(model_config, states_tp_pps):
|
||||
|
||||
with tempfile.TemporaryDirectory() as folder:
|
||||
states = merge_pp(states_tp_pps)[0]
|
||||
|
||||
if "embedding.word_embeddings.weight" in states:
|
||||
embedding_key = "embedding.word_embeddings.weight"
|
||||
elif "embedding.weight" in states:
|
||||
embedding_key = "embedding.weight"
|
||||
else:
|
||||
print("Check embedding states'names in below:", flush=True)
|
||||
print(list(states.keys()), flush=True)
|
||||
|
||||
dims_per_head = model_config["hidden_size"] // model_config["num_attention_heads"]
|
||||
base = 10000.0
|
||||
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
|
||||
|
||||
current_states = {}
|
||||
|
||||
current_states["model.embed_tokens.weight"] = states.pop(embedding_key)
|
||||
current_states["model.norm.weight"] = states.pop("norm.weight")
|
||||
current_states["lm_head.weight"] = states.pop("head.weight")
|
||||
|
||||
for i in range(model_config["num_layers"]):
|
||||
states.pop(f"blocks.{i}.mixer.rotary_emb.inv_freq", None)
|
||||
|
||||
wqkv = states.pop(f"blocks.{i}.mixer.Wqkv.weight").reshape(
|
||||
3, model_config["num_attention_heads"], -1, model_config["hidden_size"]
|
||||
)
|
||||
bqkv = states.pop(f"blocks.{i}.mixer.Wqkv.bias").reshape(3, model_config["num_attention_heads"], -1)
|
||||
|
||||
current_states[f"model.layers.{i}.self_attn.q_proj.weight"] = wqkv[0].reshape(
|
||||
-1, model_config["hidden_size"]
|
||||
)
|
||||
current_states[f"model.layers.{i}.self_attn.q_proj.bias"] = bqkv[0].reshape(-1)
|
||||
current_states[f"model.layers.{i}.self_attn.k_proj.weight"] = wqkv[1].reshape(
|
||||
-1, model_config["hidden_size"]
|
||||
)
|
||||
current_states[f"model.layers.{i}.self_attn.k_proj.bias"] = bqkv[1].reshape(-1)
|
||||
current_states[f"model.layers.{i}.self_attn.v_proj.weight"] = wqkv[2].reshape(
|
||||
-1, model_config["hidden_size"]
|
||||
)
|
||||
current_states[f"model.layers.{i}.self_attn.v_proj.bias"] = bqkv[2].reshape(-1)
|
||||
|
||||
current_states[f"model.layers.{i}.self_attn.o_proj.weight"] = states.pop(
|
||||
f"blocks.{i}.mixer.out_proj.weight"
|
||||
)
|
||||
current_states[f"model.layers.{i}.self_attn.o_proj.bias"] = states.pop(f"blocks.{i}.mixer.out_proj.bias")
|
||||
|
||||
current_states[f"model.layers.{i}.mlp.gate_proj.weight"] = states.pop(f"blocks.{i}.mlp.w1.weight")
|
||||
current_states[f"model.layers.{i}.mlp.down_proj.weight"] = states.pop(f"blocks.{i}.mlp.w3.weight")
|
||||
current_states[f"model.layers.{i}.mlp.up_proj.weight"] = states.pop(f"blocks.{i}.mlp.w2.weight")
|
||||
|
||||
current_states[f"model.layers.{i}.input_layernorm.weight"] = states.pop(f"blocks.{i}.norm1.weight")
|
||||
current_states[f"model.layers.{i}.post_attention_layernorm.weight"] = states.pop(f"blocks.{i}.norm2.weight")
|
||||
current_states[f"model.layers.{i}.self_attn.rotary_emb.inv_freq"] = inv_freq
|
||||
|
||||
config = InternLMConfig(
|
||||
hidden_size=model_config["hidden_size"],
|
||||
intermediate_size=compute_intermediate_size(model_config["hidden_size"]),
|
||||
num_attention_heads=model_config["num_attention_heads"],
|
||||
num_hidden_layers=model_config["num_layers"],
|
||||
rms_norm_eps=1e-06,
|
||||
bias=True,
|
||||
)
|
||||
|
||||
if model_config["vocab_size"] != -1:
|
||||
config.vocab_size = model_config["vocab_size"]
|
||||
|
||||
config.save_pretrained(folder)
|
||||
torch.save(current_states, os.path.join(folder, "pytorch_model.bin"))
|
||||
|
||||
model = InternLMForCausalLM.from_pretrained(folder, torch_dtype=torch.float16)
|
||||
del model.config._name_or_path
|
||||
|
||||
return config, model
|
||||
|
||||
|
||||
def compute_intermediate_size(n):
|
||||
return int(math.ceil(n * 8 / 3) + 255) // 256 * 256
|
||||
|
||||
|
||||
def merge_pp(states_tp_pp):
|
||||
max_tp = len(states_tp_pp)
|
||||
max_pp = len(states_tp_pp[0])
|
||||
|
||||
full_states = []
|
||||
for tp in range(max_tp):
|
||||
layer_shift = 0
|
||||
|
||||
tp_states = {}
|
||||
for pp in range(max_pp):
|
||||
_layer_shift = 0
|
||||
states = states_tp_pp[tp][pp]
|
||||
keys = list(states.keys())
|
||||
for key in keys:
|
||||
match = re.search("\.\d+\.", key)
|
||||
if match is not None:
|
||||
s, e = match.span()
|
||||
layer_idx = int(key[s + 1 : e - 1]) + layer_shift
|
||||
_layer_shift = max(_layer_shift, int(key[s + 1 : e - 1]))
|
||||
name = key[:s] + f".{layer_idx}." + key[e:]
|
||||
tp_states[name] = states[key]
|
||||
else:
|
||||
tp_states[key] = states[key]
|
||||
layer_shift += _layer_shift + 1
|
||||
full_states.append({(key[6:] if key.startswith("model.") else key): value for key, value in tp_states.items()})
|
||||
return full_states
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--src_folder', type=str, default='~/test/') # 需要转换为hf格式的checkpoint文件夹
|
||||
parser.add_argument('--tgt_folder', type=str, default='~/output/') # 存放转换后checkpoint的目标文件夹
|
||||
parser.add_argument('--tokenizer', type=str, default='~/test/tokenizer.model') # Tokenizer 文件的路径
|
||||
args = parser.parse_args()
|
||||
|
||||
def load(fp):
|
||||
with open(fp, "rb") as f:
|
||||
pt_data = torch.load(f, map_location="cpu")
|
||||
return pt_data
|
||||
|
||||
folder = args.src_folder
|
||||
target_folder = args.tgt_folder
|
||||
model_config = load(os.path.join(folder, "model_config.pt"))
|
||||
|
||||
fns = list(os.listdir(folder))
|
||||
|
||||
model_fns = []
|
||||
for fn in fns:
|
||||
if fn.startswith("model_t") and not fn.endswith("md5"):
|
||||
model_fns.append(fn)
|
||||
|
||||
max_tp, max_pp = -1, -1
|
||||
for fn in model_fns:
|
||||
_, tp, pp = os.path.splitext(fn)[0].split("_")
|
||||
max_pp = max(max_pp, int(pp[2:]) + 1)
|
||||
max_tp = max(max_tp, int(tp[2:]) + 1)
|
||||
|
||||
states_tp_pps = [[]]
|
||||
|
||||
for pp in range(max_pp):
|
||||
model_name = f"model_tp0_pp{pp}.pt"
|
||||
states = load(os.path.join(folder, model_name))
|
||||
states_tp_pps[0].append(states)
|
||||
|
||||
config, model = convert2hf(model_config, states_tp_pps)
|
||||
|
||||
os.makedirs(target_folder, exist_ok=True)
|
||||
model.save_pretrained(target_folder, max_shard_size="20GB")
|
||||
# TODO There should be a better way to add this.
|
||||
with open(os.path.join(target_folder, "config.json")) as fp:
|
||||
config_dict = json.load(fp)
|
||||
config_dict["auto_map"]["AutoModel"] = "modeling_internlm.InternLMForCausalLM"
|
||||
with open(os.path.join(target_folder, "config.json"), "w") as fp:
|
||||
json.dump(config_dict, fp, indent=2)
|
||||
|
||||
tokenizer = InternLMTokenizer(args.tokenizer)
|
||||
tokenizer.save_pretrained(target_folder)
|
@@ -0,0 +1,137 @@
|
||||
import copy
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, List, Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GenerationConfig:
|
||||
max_length: Optional[int] = None
|
||||
top_p: Optional[float] = None
|
||||
temperature: Optional[float] = None
|
||||
do_sample: Optional[bool] = True
|
||||
repetition_penalty: Optional[float] = 1.0
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_interactive(
|
||||
model,
|
||||
tokenizer,
|
||||
prompt,
|
||||
generation_config: Optional[GenerationConfig] = None,
|
||||
logits_processor: Optional[LogitsProcessorList] = None,
|
||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
||||
additional_eos_token_id: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
||||
input_length = len(inputs["input_ids"][0])
|
||||
for k, v in inputs.items():
|
||||
inputs[k] = v.cuda()
|
||||
input_ids = inputs["input_ids"]
|
||||
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
||||
if generation_config is None:
|
||||
generation_config = model.generation_config
|
||||
generation_config = copy.deepcopy(generation_config)
|
||||
model_kwargs = generation_config.update(**kwargs)
|
||||
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
|
||||
if isinstance(eos_token_id, int):
|
||||
eos_token_id = [eos_token_id]
|
||||
if additional_eos_token_id is not None:
|
||||
eos_token_id.append(additional_eos_token_id)
|
||||
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
||||
if has_default_max_length and generation_config.max_new_tokens is None:
|
||||
warnings.warn(
|
||||
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
|
||||
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
|
||||
" recommend using `max_new_tokens` to control the maximum length of the generation.",
|
||||
UserWarning,
|
||||
)
|
||||
elif generation_config.max_new_tokens is not None:
|
||||
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
||||
if not has_default_max_length:
|
||||
logger.warn(
|
||||
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
||||
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
||||
"Please refer to the documentation for more information. "
|
||||
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
if input_ids_seq_length >= generation_config.max_length:
|
||||
input_ids_string = "input_ids"
|
||||
logger.warning(
|
||||
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
|
||||
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
||||
" increasing `max_new_tokens`."
|
||||
)
|
||||
|
||||
# 2. Set generation parameters if not already defined
|
||||
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
||||
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
||||
|
||||
logits_processor = model._get_logits_processor(
|
||||
generation_config=generation_config,
|
||||
input_ids_seq_length=input_ids_seq_length,
|
||||
encoder_input_ids=input_ids,
|
||||
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
||||
logits_processor=logits_processor,
|
||||
)
|
||||
|
||||
stopping_criteria = model._get_stopping_criteria(
|
||||
generation_config=generation_config, stopping_criteria=stopping_criteria
|
||||
)
|
||||
logits_warper = model._get_logits_warper(generation_config)
|
||||
|
||||
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
|
||||
scores = None
|
||||
while True:
|
||||
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
||||
# forward pass to get next token
|
||||
outputs = model(
|
||||
**model_inputs,
|
||||
return_dict=True,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
next_token_logits = outputs.logits[:, -1, :]
|
||||
|
||||
# pre-process distribution
|
||||
next_token_scores = logits_processor(input_ids, next_token_logits)
|
||||
next_token_scores = logits_warper(input_ids, next_token_scores)
|
||||
|
||||
# sample
|
||||
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
||||
if generation_config.do_sample:
|
||||
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
||||
else:
|
||||
next_tokens = torch.argmax(probs, dim=-1)
|
||||
|
||||
# update generated ids, model inputs, and length for next step
|
||||
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
||||
model_kwargs = model._update_model_kwargs_for_generation(
|
||||
outputs, model_kwargs, is_encoder_decoder=False
|
||||
)
|
||||
unfinished_sequences = unfinished_sequences.mul((min(next_tokens != i for i in eos_token_id)).long())
|
||||
|
||||
output_token_ids = input_ids[0].cpu().tolist()
|
||||
output_token_ids = output_token_ids[input_length:]
|
||||
for each_eos_token_id in eos_token_id:
|
||||
if output_token_ids[-1] == each_eos_token_id:
|
||||
output_token_ids = output_token_ids[:-1]
|
||||
response = tokenizer.decode(output_token_ids)
|
||||
|
||||
yield response
|
||||
# stop when each sentence is finished, or if we exceed the maximum length
|
||||
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
|
||||
break
|
@@ -0,0 +1,69 @@
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from peft import get_peft_model, LoraConfig, TaskType
|
||||
from transformers import get_linear_schedule_with_warmup
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from tqdm import tqdm
|
||||
|
||||
from moss_002_sft import get_dataset, collate_fn
|
||||
|
||||
model_path = "model_path"
|
||||
data_dir = "moss_002_sft"
|
||||
data_num = -1
|
||||
test_size = 10
|
||||
train_batch_size = 1
|
||||
epochs = 5
|
||||
val_per_steps = 1000
|
||||
lr = 9e-6
|
||||
peft_config = LoraConfig(
|
||||
task_type=TaskType.CAUSAL_LM, r=32, lora_alpha=32, lora_dropout=0.1,
|
||||
target_modules=["gate_proj", "down_proj", "up_proj", "q_proj", "k_proj", "v_proj", "o_proj"]
|
||||
)
|
||||
|
||||
|
||||
# model
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.cuda()
|
||||
|
||||
# dataset
|
||||
train_dataset, val_dataset = get_dataset(tokenizer, data_dir, num=data_num, test_size=test_size)
|
||||
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, collate_fn=lambda x: collate_fn(x, tokenizer))
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr)
|
||||
scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer, 1000, epochs * len(train_dataloader)
|
||||
)
|
||||
|
||||
# train
|
||||
fp = open("output", "w")
|
||||
model.train()
|
||||
for epoch in tqdm(range(epochs), desc="Traning Epoch"):
|
||||
batch_bar = tqdm(train_dataloader, desc="Training Batch")
|
||||
for step, batch in enumerate(batch_bar):
|
||||
batch = {k:v.cuda() for k, v in batch.items()}
|
||||
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
||||
output = model(**batch)
|
||||
|
||||
loss = output.loss
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
batch_bar.set_postfix({"loss": loss.item()})
|
||||
if (step + 1) % val_per_steps == 0:
|
||||
fp.write(f"Epoch {epoch} Batch {step}: Loss={loss.item()}\n")
|
||||
for i in tqdm(range(len(val_dataset)), desc="Generating"):
|
||||
data, label = val_dataset[i]
|
||||
prefix = tokenizer.decode(data.tolist(), skip_special_tokens=True)
|
||||
try:
|
||||
generate = model.generate(input_ids=data.unsqueeze(0).cuda(), temperature=0.7, top_k=50, do_sample=True, repetition_penalty=1.02, max_new_tokens=100, top_p=0.9)
|
||||
text = tokenizer.decode(generate[0].tolist(), skip_special_tokens=True)
|
||||
text = text.replace(prefix, "")
|
||||
fp.write(f"Prefix: {prefix}\nGenerated: {text}" + "\n---------------------------------\n")
|
||||
except Exception as e:
|
||||
fp.write(f"Prefix: {prefix}\nError: {e}" + "\n---------------------------------\n")
|
||||
fp.write("\n==============================\n")
|
||||
model.train()
|
||||
torch.cuda.empty_cache()
|
@@ -0,0 +1,105 @@
|
||||
import os
|
||||
import copy
|
||||
|
||||
import torch
|
||||
from torch.utils.data import Dataset
|
||||
from datasets import load_dataset, Dataset as HFDataset
|
||||
|
||||
class SFTDataset(Dataset):
|
||||
# https://github.com/OpenLMLab/MOSS/blob/main/finetune_moss.py
|
||||
def __init__(self, dataset):
|
||||
super().__init__()
|
||||
self.dataset = dataset
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dataset)
|
||||
|
||||
def __getitem__(self, index):
|
||||
data = copy.deepcopy(self.dataset[index]["input_ids"])
|
||||
no_loss_spans = copy.deepcopy(self.dataset[index]["no_loss_spans"])
|
||||
|
||||
data = torch.tensor(data, dtype=torch.long)
|
||||
label = copy.deepcopy(data)
|
||||
|
||||
for no_loss_span in no_loss_spans:
|
||||
label[no_loss_span[0] : no_loss_span[1]] = -100
|
||||
|
||||
return data, label
|
||||
|
||||
def collate_fn(batch, tokenizer):
|
||||
batch_input_ids, batch_labels = [], []
|
||||
for input_ids, label in batch:
|
||||
batch_input_ids.append(input_ids)
|
||||
batch_labels.append(label)
|
||||
|
||||
batch_input_ids = torch.nn.utils.rnn.pad_sequence(batch_input_ids, batch_first=True, padding_value=tokenizer.eos_token_id)
|
||||
batch_labels = torch.nn.utils.rnn.pad_sequence(batch_labels, batch_first=True, padding_value=-100)
|
||||
|
||||
return {
|
||||
"input_ids": batch_input_ids,
|
||||
"attention_mask": (batch_input_ids == tokenizer.eos_token_id).long(),
|
||||
"labels": batch_labels
|
||||
}
|
||||
|
||||
def process(sample, tokenizer, max_len):
|
||||
chat = sample["plain_text"].split("<eoa>")[:-1]
|
||||
num_turns = sample["num_turns"]
|
||||
meta_instruction = sample["prefix"]
|
||||
|
||||
# encode instruction
|
||||
instruction_ids = tokenizer.encode(meta_instruction)
|
||||
assert isinstance(instruction_ids, list), instruction_ids
|
||||
assert len(instruction_ids) > 0, len(instruction_ids)
|
||||
input_ids = copy.deepcopy(instruction_ids)
|
||||
# We do not calculate loss for instruction.
|
||||
no_loss_spans = [(0, len(instruction_ids))]
|
||||
|
||||
for i in range(num_turns):
|
||||
# Collect dialogues
|
||||
cur_turn_ids = []
|
||||
cur_no_loss_spans = []
|
||||
# Add to cur_turn_ids
|
||||
cur_turn_ids.extend(tokenizer.encode(chat[i] + "<eoa>"))
|
||||
# if key == 'Tool Responses':
|
||||
# # The format tokens (<|Results|>:...<eor>\n) should have losses.
|
||||
# cur_no_loss_spans.append((len(input_ids + cur_turn_ids) + 5, len(input_ids + cur_turn_ids + cur_ids) - 2))
|
||||
if len(input_ids + cur_turn_ids) > max_len:
|
||||
# Too long, break
|
||||
break
|
||||
# Extend input_ids
|
||||
input_ids.extend(cur_turn_ids)
|
||||
no_loss_spans.extend(cur_no_loss_spans)
|
||||
|
||||
if len(input_ids) == len(instruction_ids):
|
||||
# No dialogue, return
|
||||
return {"input_ids": [], "no_loss_spans": []}
|
||||
else:
|
||||
return {"input_ids": input_ids, "no_loss_spans": no_loss_spans}
|
||||
|
||||
|
||||
def load_data(save_dir, tokenizer, max_len, num=-1) -> HFDataset:
|
||||
if os.path.exists(save_dir):
|
||||
print(f"Loading moss-002-sft from {save_dir}")
|
||||
else:
|
||||
print(f"Loading moss-002-sft from datasets")
|
||||
moss_sft = load_dataset("fnlp/moss-002-sft-data", split="train")
|
||||
moss_sft = moss_sft.map(lambda x:process(x, tokenizer, max_len), num_proc=10)
|
||||
moss_sft = moss_sft.filter(lambda x:len(x["input_ids"]) != 0)
|
||||
moss_sft.save_to_disk(save_dir)
|
||||
|
||||
moss_sft = HFDataset.load_from_disk(save_dir)
|
||||
if num != -1:
|
||||
moss_sft = moss_sft.select(range(num))
|
||||
print(
|
||||
f"Load successfully, total {len(moss_sft)} samples.")
|
||||
|
||||
return moss_sft
|
||||
|
||||
def get_dataset(tokenizer, save_dir, max_len=1024, num=-1, test_size=0.1):
|
||||
moss_sft_data = load_data(save_dir, tokenizer, max_len, num)
|
||||
moss_sft_split = moss_sft_data.train_test_split(test_size=test_size)
|
||||
train_dataset = SFTDataset(moss_sft_split["train"])
|
||||
val_dataset = SFTDataset(moss_sft_split["test"])
|
||||
|
||||
return train_dataset, val_dataset
|
||||
|
@@ -0,0 +1,998 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
||||
# and OPT implementations in this library. It has been modified from its
|
||||
# original forms to accommodate minor architectural differences compared
|
||||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch InternLM model."""
|
||||
import math
|
||||
from typing import List, Optional, Tuple, Union
|
||||
import threading, queue
|
||||
|
||||
import torch
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
||||
from transformers.modeling_utils import PreTrainedModel
|
||||
from transformers.generation.streamers import BaseStreamer
|
||||
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
||||
from configuration_internlm import InternLMConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
_CONFIG_FOR_DOC = "InternLMConfig"
|
||||
|
||||
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
||||
def _make_causal_mask(
|
||||
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
||||
):
|
||||
"""
|
||||
Make causal mask used for bi-directional self-attention.
|
||||
"""
|
||||
bsz, tgt_len = input_ids_shape
|
||||
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
||||
mask_cond = torch.arange(mask.size(-1), device=device)
|
||||
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
||||
mask = mask.to(dtype)
|
||||
|
||||
if past_key_values_length > 0:
|
||||
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
||||
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
||||
|
||||
|
||||
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
||||
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
||||
"""
|
||||
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
||||
"""
|
||||
bsz, src_len = mask.size()
|
||||
tgt_len = tgt_len if tgt_len is not None else src_len
|
||||
|
||||
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
||||
|
||||
inverted_mask = 1.0 - expanded_mask
|
||||
|
||||
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
||||
|
||||
|
||||
class InternLMRMSNorm(nn.Module):
|
||||
def __init__(self, hidden_size, eps=1e-6):
|
||||
"""
|
||||
InternLMRMSNorm is equivalent to T5LayerNorm
|
||||
"""
|
||||
super().__init__()
|
||||
self.weight = nn.Parameter(torch.ones(hidden_size))
|
||||
self.variance_epsilon = eps
|
||||
|
||||
def forward(self, hidden_states):
|
||||
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
||||
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
||||
|
||||
# convert into half-precision if necessary
|
||||
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
||||
hidden_states = hidden_states.to(self.weight.dtype)
|
||||
|
||||
return self.weight * hidden_states
|
||||
|
||||
|
||||
class InternLMRotaryEmbedding(torch.nn.Module):
|
||||
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
||||
super().__init__()
|
||||
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
||||
self.register_buffer("inv_freq", inv_freq)
|
||||
|
||||
# Build here to make `torch.jit.trace` work.
|
||||
self.max_seq_len_cached = max_position_embeddings
|
||||
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
|
||||
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
||||
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
||||
emb = torch.cat((freqs, freqs), dim=-1)
|
||||
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
||||
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
||||
|
||||
def forward(self, x, seq_len=None):
|
||||
# x: [bs, num_attention_heads, seq_len, head_size]
|
||||
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
|
||||
if seq_len > self.max_seq_len_cached:
|
||||
self.max_seq_len_cached = seq_len
|
||||
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
|
||||
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
||||
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
||||
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
|
||||
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
||||
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
||||
return (
|
||||
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
||||
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
||||
)
|
||||
|
||||
|
||||
def rotate_half(x):
|
||||
"""Rotates half the hidden dims of the input."""
|
||||
x1 = x[..., : x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2 :]
|
||||
return torch.cat((-x2, x1), dim=-1)
|
||||
|
||||
|
||||
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
||||
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
|
||||
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
|
||||
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
|
||||
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
||||
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
|
||||
q_embed = (q * cos) + (rotate_half(q) * sin)
|
||||
k_embed = (k * cos) + (rotate_half(k) * sin)
|
||||
return q_embed, k_embed
|
||||
|
||||
|
||||
class InternLMMLP(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
intermediate_size: int,
|
||||
hidden_act: str,
|
||||
):
|
||||
super().__init__()
|
||||
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
||||
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
|
||||
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
||||
self.act_fn = ACT2FN[hidden_act]
|
||||
|
||||
def forward(self, x):
|
||||
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
||||
|
||||
|
||||
class InternLMAttention(nn.Module):
|
||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||
|
||||
def __init__(self, config: InternLMConfig):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.hidden_size = config.hidden_size
|
||||
self.num_heads = config.num_attention_heads
|
||||
self.head_dim = self.hidden_size // self.num_heads
|
||||
self.max_position_embeddings = config.max_position_embeddings
|
||||
|
||||
if (self.head_dim * self.num_heads) != self.hidden_size:
|
||||
raise ValueError(
|
||||
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
||||
f" and `num_heads`: {self.num_heads})."
|
||||
)
|
||||
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
|
||||
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
|
||||
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
|
||||
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
||||
self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
|
||||
|
||||
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
||||
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
||||
output_attentions: bool = False,
|
||||
use_cache: bool = False,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||
bsz, q_len, _ = hidden_states.size()
|
||||
|
||||
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
|
||||
kv_seq_len = key_states.shape[-2]
|
||||
if past_key_value is not None:
|
||||
kv_seq_len += past_key_value[0].shape[-2]
|
||||
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
||||
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
||||
# [bsz, nh, t, hd]
|
||||
|
||||
if past_key_value is not None:
|
||||
# reuse k, v, self_attention
|
||||
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
||||
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
||||
|
||||
past_key_value = (key_states, value_states) if use_cache else None
|
||||
|
||||
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
||||
|
||||
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
||||
raise ValueError(
|
||||
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
||||
f" {attn_weights.size()}"
|
||||
)
|
||||
|
||||
if attention_mask is not None:
|
||||
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
||||
raise ValueError(
|
||||
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
||||
)
|
||||
attn_weights = attn_weights + attention_mask
|
||||
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
|
||||
|
||||
# upcast attention to fp32
|
||||
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
||||
attn_output = torch.matmul(attn_weights, value_states)
|
||||
|
||||
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
||||
raise ValueError(
|
||||
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
||||
f" {attn_output.size()}"
|
||||
)
|
||||
|
||||
attn_output = attn_output.transpose(1, 2)
|
||||
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
||||
|
||||
attn_output = self.o_proj(attn_output)
|
||||
|
||||
if not output_attentions:
|
||||
attn_weights = None
|
||||
|
||||
return attn_output, attn_weights, past_key_value
|
||||
|
||||
|
||||
class InternLMDecoderLayer(nn.Module):
|
||||
def __init__(self, config: InternLMConfig):
|
||||
super().__init__()
|
||||
self.hidden_size = config.hidden_size
|
||||
self.self_attn = InternLMAttention(config=config)
|
||||
self.mlp = InternLMMLP(
|
||||
hidden_size=self.hidden_size,
|
||||
intermediate_size=config.intermediate_size,
|
||||
hidden_act=config.hidden_act,
|
||||
)
|
||||
self.input_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
self.post_attention_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
||||
output_attentions: Optional[bool] = False,
|
||||
use_cache: Optional[bool] = False,
|
||||
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
||||
"""
|
||||
Args:
|
||||
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
||||
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
||||
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
||||
output_attentions (`bool`, *optional*):
|
||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
||||
returned tensors for more detail.
|
||||
use_cache (`bool`, *optional*):
|
||||
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
||||
(see `past_key_values`).
|
||||
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
||||
"""
|
||||
|
||||
residual = hidden_states
|
||||
|
||||
hidden_states = self.input_layernorm(hidden_states)
|
||||
|
||||
# Self Attention
|
||||
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
||||
hidden_states=hidden_states,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_value=past_key_value,
|
||||
output_attentions=output_attentions,
|
||||
use_cache=use_cache,
|
||||
)
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
# Fully Connected
|
||||
residual = hidden_states
|
||||
hidden_states = self.post_attention_layernorm(hidden_states)
|
||||
hidden_states = self.mlp(hidden_states)
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
outputs = (hidden_states,)
|
||||
|
||||
if output_attentions:
|
||||
outputs += (self_attn_weights,)
|
||||
|
||||
if use_cache:
|
||||
outputs += (present_key_value,)
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
INTERNLM_START_DOCSTRING = r"""
|
||||
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
||||
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
||||
etc.)
|
||||
|
||||
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
||||
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
||||
and behavior.
|
||||
|
||||
Parameters:
|
||||
config ([`InternLMConfig`]):
|
||||
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
||||
load the weights associated with the model, only the configuration. Check out the
|
||||
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
||||
"""
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"The bare InternLM Model outputting raw hidden-states without any specific head on top.",
|
||||
INTERNLM_START_DOCSTRING,
|
||||
)
|
||||
class InternLMPreTrainedModel(PreTrainedModel):
|
||||
config_class = InternLMConfig
|
||||
base_model_prefix = "model"
|
||||
supports_gradient_checkpointing = True
|
||||
_no_split_modules = ["InternLMDecoderLayer"]
|
||||
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
|
||||
|
||||
def _init_weights(self, module):
|
||||
std = self.config.initializer_range
|
||||
if isinstance(module, nn.Linear):
|
||||
module.weight.data.normal_(mean=0.0, std=std)
|
||||
if module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
elif isinstance(module, nn.Embedding):
|
||||
module.weight.data.normal_(mean=0.0, std=std)
|
||||
if module.padding_idx is not None:
|
||||
module.weight.data[module.padding_idx].zero_()
|
||||
|
||||
def _set_gradient_checkpointing(self, module, value=False):
|
||||
if isinstance(module, InternLMModel):
|
||||
module.gradient_checkpointing = value
|
||||
|
||||
|
||||
INTERNLM_INPUTS_DOCSTRING = r"""
|
||||
Args:
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
||||
it.
|
||||
|
||||
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
||||
[`PreTrainedTokenizer.__call__`] for details.
|
||||
|
||||
[What are input IDs?](../glossary#input-ids)
|
||||
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
||||
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
|
||||
[What are attention masks?](../glossary#attention-mask)
|
||||
|
||||
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
||||
[`PreTrainedTokenizer.__call__`] for details.
|
||||
|
||||
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
||||
`past_key_values`).
|
||||
|
||||
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
||||
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
||||
information on the default strategy.
|
||||
|
||||
- 1 indicates the head is **not masked**,
|
||||
- 0 indicates the head is **masked**.
|
||||
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
||||
config.n_positions - 1]`.
|
||||
|
||||
[What are position IDs?](../glossary#position-ids)
|
||||
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
||||
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
||||
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
||||
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
||||
|
||||
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
||||
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
||||
|
||||
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
||||
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
||||
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
||||
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
||||
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
||||
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
||||
model's internal embedding lookup matrix.
|
||||
use_cache (`bool`, *optional*):
|
||||
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
||||
`past_key_values`).
|
||||
output_attentions (`bool`, *optional*):
|
||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
||||
tensors for more detail.
|
||||
output_hidden_states (`bool`, *optional*):
|
||||
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
||||
more detail.
|
||||
return_dict (`bool`, *optional*):
|
||||
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
||||
"""
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"The bare InternLM Model outputting raw hidden-states without any specific head on top.",
|
||||
INTERNLM_START_DOCSTRING,
|
||||
)
|
||||
class InternLMModel(InternLMPreTrainedModel):
|
||||
"""
|
||||
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
|
||||
|
||||
Args:
|
||||
config: InternLMConfig
|
||||
"""
|
||||
_auto_class = "AutoModel"
|
||||
|
||||
def __init__(self, config: InternLMConfig):
|
||||
super().__init__(config)
|
||||
self.padding_idx = config.pad_token_id
|
||||
self.vocab_size = config.vocab_size
|
||||
|
||||
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
||||
self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
||||
self.norm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
|
||||
self.gradient_checkpointing = False
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.embed_tokens
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.embed_tokens = value
|
||||
|
||||
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
||||
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
||||
# create causal mask
|
||||
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
||||
combined_attention_mask = None
|
||||
if input_shape[-1] > 1:
|
||||
combined_attention_mask = _make_causal_mask(
|
||||
input_shape,
|
||||
inputs_embeds.dtype,
|
||||
device=inputs_embeds.device,
|
||||
past_key_values_length=past_key_values_length,
|
||||
)
|
||||
|
||||
if attention_mask is not None:
|
||||
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
||||
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
||||
inputs_embeds.device
|
||||
)
|
||||
combined_attention_mask = (
|
||||
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
||||
)
|
||||
|
||||
return combined_attention_mask
|
||||
|
||||
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.LongTensor = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||
use_cache: Optional[bool] = None,
|
||||
output_attentions: Optional[bool] = None,
|
||||
output_hidden_states: Optional[bool] = None,
|
||||
return_dict: Optional[bool] = None,
|
||||
) -> Union[Tuple, BaseModelOutputWithPast]:
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
||||
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
# retrieve input_ids and inputs_embeds
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
batch_size, seq_length = input_ids.shape
|
||||
elif inputs_embeds is not None:
|
||||
batch_size, seq_length, _ = inputs_embeds.shape
|
||||
else:
|
||||
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
||||
|
||||
seq_length_with_past = seq_length
|
||||
past_key_values_length = 0
|
||||
|
||||
if past_key_values is not None:
|
||||
past_key_values_length = past_key_values[0][0].shape[2]
|
||||
seq_length_with_past = seq_length_with_past + past_key_values_length
|
||||
|
||||
if position_ids is None:
|
||||
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
||||
position_ids = torch.arange(
|
||||
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
||||
)
|
||||
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
||||
else:
|
||||
position_ids = position_ids.view(-1, seq_length).long()
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.embed_tokens(input_ids)
|
||||
# embed positions
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(
|
||||
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
||||
)
|
||||
attention_mask = self._prepare_decoder_attention_mask(
|
||||
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
||||
)
|
||||
|
||||
hidden_states = inputs_embeds
|
||||
|
||||
if self.gradient_checkpointing and self.training:
|
||||
if use_cache:
|
||||
logger.warning_once(
|
||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||
)
|
||||
use_cache = False
|
||||
|
||||
# decoder layers
|
||||
all_hidden_states = () if output_hidden_states else None
|
||||
all_self_attns = () if output_attentions else None
|
||||
next_decoder_cache = () if use_cache else None
|
||||
|
||||
for idx, decoder_layer in enumerate(self.layers):
|
||||
if output_hidden_states:
|
||||
all_hidden_states += (hidden_states,)
|
||||
|
||||
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
||||
|
||||
if self.gradient_checkpointing and self.training:
|
||||
|
||||
def create_custom_forward(module):
|
||||
def custom_forward(*inputs):
|
||||
# None for past_key_value
|
||||
return module(*inputs, output_attentions, None)
|
||||
|
||||
return custom_forward
|
||||
|
||||
layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(decoder_layer),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
position_ids,
|
||||
None,
|
||||
)
|
||||
else:
|
||||
layer_outputs = decoder_layer(
|
||||
hidden_states,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_value=past_key_value,
|
||||
output_attentions=output_attentions,
|
||||
use_cache=use_cache,
|
||||
)
|
||||
|
||||
hidden_states = layer_outputs[0]
|
||||
|
||||
if use_cache:
|
||||
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
||||
|
||||
if output_attentions:
|
||||
all_self_attns += (layer_outputs[1],)
|
||||
|
||||
hidden_states = self.norm(hidden_states)
|
||||
|
||||
# add hidden states from the last decoder layer
|
||||
if output_hidden_states:
|
||||
all_hidden_states += (hidden_states,)
|
||||
|
||||
next_cache = next_decoder_cache if use_cache else None
|
||||
if not return_dict:
|
||||
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
||||
return BaseModelOutputWithPast(
|
||||
last_hidden_state=hidden_states,
|
||||
past_key_values=next_cache,
|
||||
hidden_states=all_hidden_states,
|
||||
attentions=all_self_attns,
|
||||
)
|
||||
|
||||
|
||||
class InternLMForCausalLM(InternLMPreTrainedModel):
|
||||
_auto_class = "AutoModelForCausalLM"
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.model = InternLMModel(config)
|
||||
|
||||
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.model.embed_tokens
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.model.embed_tokens = value
|
||||
|
||||
def get_output_embeddings(self):
|
||||
return self.lm_head
|
||||
|
||||
def set_output_embeddings(self, new_embeddings):
|
||||
self.lm_head = new_embeddings
|
||||
|
||||
def set_decoder(self, decoder):
|
||||
self.model = decoder
|
||||
|
||||
def get_decoder(self):
|
||||
return self.model
|
||||
|
||||
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
|
||||
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.LongTensor = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||
labels: Optional[torch.LongTensor] = None,
|
||||
use_cache: Optional[bool] = None,
|
||||
output_attentions: Optional[bool] = None,
|
||||
output_hidden_states: Optional[bool] = None,
|
||||
return_dict: Optional[bool] = None,
|
||||
) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||
r"""
|
||||
Args:
|
||||
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
||||
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
||||
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
||||
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, InternLMForCausalLM
|
||||
|
||||
>>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
||||
|
||||
>>> prompt = "Hey, are you consciours? Can you talk to me?"
|
||||
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||
|
||||
>>> # Generate
|
||||
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
||||
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
|
||||
```"""
|
||||
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
||||
outputs = self.model(
|
||||
input_ids=input_ids,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_values=past_key_values,
|
||||
inputs_embeds=inputs_embeds,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
|
||||
hidden_states = outputs[0]
|
||||
logits = self.lm_head(hidden_states)
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
# Shift so that tokens < n predict n
|
||||
shift_logits = logits[..., :-1, :].contiguous()
|
||||
shift_labels = labels[..., 1:].contiguous()
|
||||
# Flatten the tokens
|
||||
loss_fct = CrossEntropyLoss()
|
||||
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
||||
shift_labels = shift_labels.view(-1)
|
||||
# Enable model parallelism
|
||||
shift_labels = shift_labels.to(shift_logits.device)
|
||||
loss = loss_fct(shift_logits, shift_labels)
|
||||
|
||||
if not return_dict:
|
||||
output = (logits,) + outputs[1:]
|
||||
return (loss,) + output if loss is not None else output
|
||||
|
||||
return CausalLMOutputWithPast(
|
||||
loss=loss,
|
||||
logits=logits,
|
||||
past_key_values=outputs.past_key_values,
|
||||
hidden_states=outputs.hidden_states,
|
||||
attentions=outputs.attentions,
|
||||
)
|
||||
|
||||
def prepare_inputs_for_generation(
|
||||
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
||||
):
|
||||
if past_key_values:
|
||||
input_ids = input_ids[:, -1:]
|
||||
|
||||
position_ids = kwargs.get("position_ids", None)
|
||||
if attention_mask is not None and position_ids is None:
|
||||
# create position_ids on the fly for batch generation
|
||||
position_ids = attention_mask.long().cumsum(-1) - 1
|
||||
position_ids.masked_fill_(attention_mask == 0, 1)
|
||||
if past_key_values:
|
||||
position_ids = position_ids[:, -1].unsqueeze(-1)
|
||||
|
||||
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
||||
if inputs_embeds is not None and past_key_values is None:
|
||||
model_inputs = {"inputs_embeds": inputs_embeds}
|
||||
else:
|
||||
model_inputs = {"input_ids": input_ids}
|
||||
|
||||
model_inputs.update(
|
||||
{
|
||||
"position_ids": position_ids,
|
||||
"past_key_values": past_key_values,
|
||||
"use_cache": kwargs.get("use_cache"),
|
||||
"attention_mask": attention_mask,
|
||||
}
|
||||
)
|
||||
return model_inputs
|
||||
|
||||
@staticmethod
|
||||
def _reorder_cache(past_key_values, beam_idx):
|
||||
reordered_past = ()
|
||||
for layer_past in past_key_values:
|
||||
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
||||
return reordered_past
|
||||
|
||||
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
|
||||
prompt = ""
|
||||
for record in history:
|
||||
prompt += f"""<s><|User|>:{record[0]}<eoh>\n<|Bot|>:{record[1]}<eoa>\n"""
|
||||
if len(prompt) == 0:
|
||||
prompt += "<s>"
|
||||
prompt += f"""<|User|>:{query}<eoh>\n<|Bot|>:"""
|
||||
return tokenizer([prompt], return_tensors="pt")
|
||||
|
||||
@torch.no_grad()
|
||||
def chat(self,
|
||||
tokenizer,
|
||||
query: str,
|
||||
history: List[Tuple[str, str]] = [],
|
||||
streamer: Optional[BaseStreamer] = None,
|
||||
max_new_tokens: int = 1024,
|
||||
do_sample: bool = True,
|
||||
temperature: float = 0.8,
|
||||
top_p: float = 0.8,
|
||||
**kwargs):
|
||||
inputs = self.build_inputs(tokenizer, query, history)
|
||||
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
||||
outputs = self.generate(**inputs,
|
||||
streamer=streamer,
|
||||
max_new_tokens=max_new_tokens,
|
||||
do_sample=do_sample,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
**kwargs)
|
||||
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]):]
|
||||
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
||||
response = response.split("<eoa>")[0]
|
||||
history = history + [(query, response)]
|
||||
return response, history
|
||||
|
||||
@torch.no_grad()
|
||||
def stream_chat(self,
|
||||
tokenizer,
|
||||
query: str,
|
||||
history: List[Tuple[str, str]] = [],
|
||||
max_new_tokens: int = 1024,
|
||||
do_sample: bool = True,
|
||||
temperature: float = 0.8,
|
||||
top_p: float = 0.8,
|
||||
**kwargs):
|
||||
"""
|
||||
Return a generator in format: (response, history)
|
||||
Eg.
|
||||
('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
|
||||
('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
|
||||
"""
|
||||
|
||||
response_queue = queue.Queue(maxsize=20)
|
||||
|
||||
class ChatStreamer(BaseStreamer):
|
||||
def __init__(self, tokenizer) -> None:
|
||||
super().__init__()
|
||||
self.tokenizer = tokenizer
|
||||
self.queue = response_queue
|
||||
self.query = query
|
||||
self.history = history
|
||||
self.response = ""
|
||||
self.received_inputs = False
|
||||
self.queue.put((self.response, history + [(self.query, self.response)]))
|
||||
|
||||
def put(self, value):
|
||||
if len(value.shape) > 1 and value.shape[0] > 1:
|
||||
raise ValueError("ChatStreamer only supports batch size 1")
|
||||
elif len(value.shape) > 1:
|
||||
value = value[0]
|
||||
|
||||
if not self.received_inputs:
|
||||
# The first received value is input_ids, ignore here
|
||||
self.received_inputs = True
|
||||
return
|
||||
|
||||
token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
|
||||
if token.strip() != "<eoa>":
|
||||
self.response = self.response + token
|
||||
history = self.history + [(self.query, self.response)]
|
||||
self.queue.put((self.response, history))
|
||||
|
||||
def end(self):
|
||||
self.queue.put(None)
|
||||
|
||||
def stream_producer():
|
||||
return self.chat(
|
||||
tokenizer=tokenizer,
|
||||
query=query,
|
||||
streamer=ChatStreamer(tokenizer=tokenizer),
|
||||
history=history,
|
||||
max_new_tokens=max_new_tokens,
|
||||
do_sample=do_sample,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def consumer():
|
||||
producer = threading.Thread(target=stream_producer)
|
||||
producer.start()
|
||||
while True:
|
||||
res = response_queue.get()
|
||||
if res is not None:
|
||||
return
|
||||
yield res
|
||||
|
||||
return consumer()
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"""
|
||||
The InternLM Model transformer with a sequence classification head on top (linear layer).
|
||||
|
||||
[`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
||||
(e.g. GPT-2) do.
|
||||
|
||||
Since it does classification on the last token, it requires to know the position of the last token. If a
|
||||
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
||||
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
||||
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
||||
each row of the batch).
|
||||
""",
|
||||
INTERNLM_START_DOCSTRING,
|
||||
)
|
||||
class InternLMForSequenceClassification(InternLMPreTrainedModel):
|
||||
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.model = InternLMModel(config)
|
||||
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.model.embed_tokens
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.model.embed_tokens = value
|
||||
|
||||
@add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.LongTensor = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||
labels: Optional[torch.LongTensor] = None,
|
||||
use_cache: Optional[bool] = None,
|
||||
output_attentions: Optional[bool] = None,
|
||||
output_hidden_states: Optional[bool] = None,
|
||||
return_dict: Optional[bool] = None,
|
||||
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
||||
r"""
|
||||
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
||||
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
||||
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
||||
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
||||
"""
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
transformer_outputs = self.model(
|
||||
input_ids,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_values=past_key_values,
|
||||
inputs_embeds=inputs_embeds,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
hidden_states = transformer_outputs[0]
|
||||
logits = self.score(hidden_states)
|
||||
|
||||
if input_ids is not None:
|
||||
batch_size = input_ids.shape[0]
|
||||
else:
|
||||
batch_size = inputs_embeds.shape[0]
|
||||
|
||||
if self.config.pad_token_id is None and batch_size != 1:
|
||||
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
||||
if self.config.pad_token_id is None:
|
||||
sequence_lengths = -1
|
||||
else:
|
||||
if input_ids is not None:
|
||||
sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
|
||||
else:
|
||||
sequence_lengths = -1
|
||||
|
||||
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
labels = labels.to(logits.device)
|
||||
if self.config.problem_type is None:
|
||||
if self.num_labels == 1:
|
||||
self.config.problem_type = "regression"
|
||||
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
||||
self.config.problem_type = "single_label_classification"
|
||||
else:
|
||||
self.config.problem_type = "multi_label_classification"
|
||||
|
||||
if self.config.problem_type == "regression":
|
||||
loss_fct = MSELoss()
|
||||
if self.num_labels == 1:
|
||||
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
||||
else:
|
||||
loss = loss_fct(pooled_logits, labels)
|
||||
elif self.config.problem_type == "single_label_classification":
|
||||
loss_fct = CrossEntropyLoss()
|
||||
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
||||
elif self.config.problem_type == "multi_label_classification":
|
||||
loss_fct = BCEWithLogitsLoss()
|
||||
loss = loss_fct(pooled_logits, labels)
|
||||
if not return_dict:
|
||||
output = (pooled_logits,) + transformer_outputs[1:]
|
||||
return ((loss,) + output) if loss is not None else output
|
||||
|
||||
return SequenceClassifierOutputWithPast(
|
||||
loss=loss,
|
||||
logits=pooled_logits,
|
||||
past_key_values=transformer_outputs.past_key_values,
|
||||
hidden_states=transformer_outputs.hidden_states,
|
||||
attentions=transformer_outputs.attentions,
|
||||
)
|
@@ -0,0 +1,242 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
||||
# and OPT implementations in this library. It has been modified from its
|
||||
# original forms to accommodate minor architectural differences compared
|
||||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tokenization classes for IntermLM."""
|
||||
import os
|
||||
from shutil import copyfile
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import sentencepiece as spm
|
||||
|
||||
from transformers.tokenization_utils import PreTrainedTokenizer
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
|
||||
|
||||
PRETRAINED_VOCAB_FILES_MAP = {}
|
||||
|
||||
|
||||
class InternLMTokenizer(PreTrainedTokenizer):
|
||||
"""
|
||||
Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
|
||||
|
||||
Args:
|
||||
vocab_file (`str`):
|
||||
Path to the vocabulary file.
|
||||
"""
|
||||
|
||||
vocab_files_names = VOCAB_FILES_NAMES
|
||||
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
||||
model_input_names = ["input_ids", "attention_mask"]
|
||||
_auto_class = "AutoTokenizer"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
unk_token="<unk>",
|
||||
bos_token="<s>",
|
||||
eos_token="</s>",
|
||||
pad_token="</s>",
|
||||
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
||||
add_bos_token=True,
|
||||
add_eos_token=False,
|
||||
decode_with_prefix_space=False,
|
||||
clean_up_tokenization_spaces=False,
|
||||
**kwargs,
|
||||
):
|
||||
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
||||
super().__init__(
|
||||
bos_token=bos_token,
|
||||
eos_token=eos_token,
|
||||
unk_token=unk_token,
|
||||
pad_token=pad_token,
|
||||
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
||||
**kwargs,
|
||||
)
|
||||
self.vocab_file = vocab_file
|
||||
self.add_bos_token = add_bos_token
|
||||
self.add_eos_token = add_eos_token
|
||||
self.decode_with_prefix_space = decode_with_prefix_space
|
||||
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
||||
self.sp_model.Load(vocab_file)
|
||||
self._no_prefix_space_tokens = None
|
||||
|
||||
""" Initialisation"""
|
||||
|
||||
@property
|
||||
def no_prefix_space_tokens(self):
|
||||
if self._no_prefix_space_tokens is None:
|
||||
vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
|
||||
self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
|
||||
return self._no_prefix_space_tokens
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
"""Returns vocab size"""
|
||||
return self.sp_model.get_piece_size()
|
||||
|
||||
@property
|
||||
def bos_token_id(self) -> Optional[int]:
|
||||
return self.sp_model.bos_id()
|
||||
|
||||
@property
|
||||
def eos_token_id(self) -> Optional[int]:
|
||||
return self.sp_model.eos_id()
|
||||
|
||||
def get_vocab(self):
|
||||
"""Returns vocab as a dict"""
|
||||
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
||||
vocab.update(self.added_tokens_encoder)
|
||||
return vocab
|
||||
|
||||
def _tokenize(self, text):
|
||||
"""Returns a tokenized string."""
|
||||
return self.sp_model.encode(text, out_type=str)
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
"""Converts a token (str) in an id using the vocab."""
|
||||
return self.sp_model.piece_to_id(token)
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
token = self.sp_model.IdToPiece(index)
|
||||
return token
|
||||
|
||||
def _maybe_add_prefix_space(self, tokens, decoded):
|
||||
if tokens and tokens[0] not in self.no_prefix_space_tokens:
|
||||
return " " + decoded
|
||||
else:
|
||||
return decoded
|
||||
|
||||
def convert_tokens_to_string(self, tokens):
|
||||
"""Converts a sequence of tokens (string) in a single string."""
|
||||
current_sub_tokens = []
|
||||
out_string = ""
|
||||
prev_is_special = False
|
||||
for token in tokens:
|
||||
# make sure that special tokens are not decoded using sentencepiece model
|
||||
if token in self.all_special_tokens:
|
||||
if not prev_is_special:
|
||||
out_string += " "
|
||||
out_string += self.sp_model.decode(current_sub_tokens) + token
|
||||
prev_is_special = True
|
||||
current_sub_tokens = []
|
||||
else:
|
||||
current_sub_tokens.append(token)
|
||||
prev_is_special = False
|
||||
out_string += self.sp_model.decode(current_sub_tokens)
|
||||
out_string = self.clean_up_tokenization(out_string)
|
||||
out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
|
||||
return out_string[1:]
|
||||
|
||||
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
||||
"""
|
||||
Save the vocabulary and special tokens file to a directory.
|
||||
|
||||
Args:
|
||||
save_directory (`str`):
|
||||
The directory in which to save the vocabulary.
|
||||
|
||||
Returns:
|
||||
`Tuple(str)`: Paths to the files saved.
|
||||
"""
|
||||
if not os.path.isdir(save_directory):
|
||||
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
||||
return
|
||||
out_vocab_file = os.path.join(
|
||||
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
||||
)
|
||||
|
||||
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
||||
copyfile(self.vocab_file, out_vocab_file)
|
||||
elif not os.path.isfile(self.vocab_file):
|
||||
with open(out_vocab_file, "wb") as fi:
|
||||
content_spiece_model = self.sp_model.serialized_model_proto()
|
||||
fi.write(content_spiece_model)
|
||||
|
||||
return (out_vocab_file,)
|
||||
|
||||
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
||||
if self.add_bos_token:
|
||||
bos_token_ids = [self.bos_token_id]
|
||||
else:
|
||||
bos_token_ids = []
|
||||
|
||||
output = bos_token_ids + token_ids_0
|
||||
|
||||
if token_ids_1 is not None:
|
||||
output = output + token_ids_1
|
||||
|
||||
if self.add_eos_token:
|
||||
output = output + [self.eos_token_id]
|
||||
|
||||
return output
|
||||
|
||||
def get_special_tokens_mask(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
||||
) -> List[int]:
|
||||
"""
|
||||
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
||||
special tokens using the tokenizer `prepare_for_model` method.
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not the token list is already formatted with special tokens for the model.
|
||||
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||
"""
|
||||
if already_has_special_tokens:
|
||||
return super().get_special_tokens_mask(
|
||||
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
||||
)
|
||||
|
||||
if token_ids_1 is None:
|
||||
return [1] + ([0] * len(token_ids_0)) + [1]
|
||||
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
||||
|
||||
def create_token_type_ids_from_sequences(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
|
||||
use of token type ids, therefore a list of zeros is returned.
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
|
||||
Returns:
|
||||
`List[int]`: List of zeros.
|
||||
"""
|
||||
eos = [self.eos_token_id]
|
||||
|
||||
if token_ids_1 is None:
|
||||
return len(token_ids_0 + eos) * [0]
|
||||
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
choose_load_model = 0 # 选择加载的模型(Qwen-7B 或 Qwen-14B)
|
||||
|
||||
if choose_load_model == 0:
|
||||
# Qwen-7B(需要8G显存)
|
||||
@st.cache_resource
|
||||
def load_model_qwen_7B():
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat-Int4", trust_remote_code=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"Qwen/Qwen-7B-Chat-Int4",
|
||||
device_map="auto",
|
||||
trust_remote_code=True,
|
||||
).eval()
|
||||
return tokenizer, model
|
||||
tokenizer_qwen_7B, model_qwen_7B = load_model_qwen_7B()
|
||||
|
||||
elif choose_load_model == 1:
|
||||
# Qwen-14B(需要12G显存)
|
||||
@st.cache_resource
|
||||
def load_model_qwen_14B():
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-14B-Chat-Int4", trust_remote_code=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"Qwen/Qwen-14B-Chat-Int4",
|
||||
device_map="auto",
|
||||
trust_remote_code=True
|
||||
).eval()
|
||||
return tokenizer, model
|
||||
tokenizer_qwen_14B, model_qwen_14B = load_model_qwen_14B()
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
max_length = 409600
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.8, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.8
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
from transformers.generation import GenerationConfig
|
||||
|
||||
if choose_load_model == 0:
|
||||
config_qwen_7b = GenerationConfig.from_pretrained(
|
||||
"Qwen/Qwen-7B-Chat-Int4", trust_remote_code=True, resume_download=True, max_length = max_length, top_p = top_p, temperature = temperature
|
||||
)
|
||||
def chat_response_qwen_7B(query):
|
||||
for response in model_qwen_7B.chat_stream(tokenizer_qwen_7B, query, history=st.session_state.history_qwen, generation_config=config_qwen_7b):
|
||||
message_placeholder_qwen.markdown(response)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.history_qwen.append((query, response))
|
||||
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
|
||||
return response
|
||||
|
||||
elif choose_load_model == 1:
|
||||
config_qwen_14b = GenerationConfig.from_pretrained(
|
||||
"Qwen/Qwen-14B-Chat-Int4", trust_remote_code=True, resume_download=True, max_length = max_length, top_p = top_p, temperature = temperature
|
||||
)
|
||||
def chat_response_qwen_14B(query):
|
||||
for response in model_qwen_14B.chat_stream(tokenizer_qwen_14B, query, history=st.session_state.history_qwen, generation_config=config_qwen_14b):
|
||||
message_placeholder_qwen.markdown(response)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.history_qwen.append((query, response))
|
||||
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
|
||||
return response
|
||||
|
||||
def clear_all():
|
||||
st.session_state.history_qwen = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'history_qwen' not in st.session_state:
|
||||
st.session_state.history_qwen = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder_qwen = st.empty()
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
if choose_load_model == 0:
|
||||
chat_response_qwen_7B(prompt)
|
||||
elif choose_load_model == 1:
|
||||
chat_response_qwen_14B(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,6 @@
|
||||
transformers==4.32.0
|
||||
accelerate
|
||||
tiktoken
|
||||
einops
|
||||
transformers_stream_generator==0.0.4
|
||||
scipy
|
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
choose_load_method = 1
|
||||
|
||||
if choose_load_method == 0:
|
||||
# GPU加载(需要5G显存)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark")
|
||||
model = AutoModel.from_pretrained("suno/bark").to("cuda")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
elif choose_load_method == 1:
|
||||
# GPU加载bark-small模型(需要3G显存)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark-small")
|
||||
model = AutoModel.from_pretrained("suno/bark-small").to("cuda")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
elif choose_load_method == 2:
|
||||
# CPU加载bark模型(需要9G内存,运行速度慢,不推荐)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark")
|
||||
model = AutoModel.from_pretrained("suno/bark")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
elif choose_load_method == 3:
|
||||
# CPU加载bark-small模型(需要5G内存,运行速度慢,不推荐)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark-small")
|
||||
model = AutoModel.from_pretrained("suno/bark-small")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
prompt_placeholder = st.empty()
|
||||
with prompt_placeholder.container():
|
||||
with st.chat_message("user", avatar='user'):
|
||||
pass
|
||||
|
||||
if prompt:
|
||||
with prompt_placeholder.container():
|
||||
with st.chat_message("user", avatar='user'):
|
||||
st.write(prompt)
|
||||
st.write('正在转换中,请稍后。')
|
||||
|
||||
inputs = processor(
|
||||
text=[prompt],
|
||||
return_tensors="pt",
|
||||
)
|
||||
if choose_load_method == 0 or choose_load_method == 1:
|
||||
inputs = {key: value.to("cuda") for key, value in inputs.items()}
|
||||
|
||||
speech_values = model.generate(**inputs, do_sample=True)
|
||||
|
||||
import scipy
|
||||
sampling_rate = 24_000
|
||||
scipy.io.wavfile.write('./a.wav', rate=sampling_rate, data=speech_values.cpu().numpy().squeeze())
|
||||
|
||||
audio_file = open('./a.wav', 'rb')
|
||||
audio_bytes = audio_file.read()
|
||||
st.audio(audio_bytes, format='audio/wav')
|
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import openai
|
||||
API_BASE = "https://api.deepseek.com"
|
||||
API_KEY = "your key"
|
||||
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_deepseek_chat(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
client = openai.OpenAI(
|
||||
api_key=API_KEY,
|
||||
base_url=API_BASE
|
||||
)
|
||||
completion = client.chat.completions.create(
|
||||
model="deepseek-chat",
|
||||
messages=st.session_state.messages,
|
||||
stream=True,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
full_content = ''
|
||||
for chunk in completion:
|
||||
response = chunk.choices[0].delta.content or ""
|
||||
full_content += response
|
||||
message_placeholder.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant',
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_deepseek_chat(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
||||
|
@@ -0,0 +1,92 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import openai
|
||||
API_BASE = "https://api.deepseek.com"
|
||||
API_KEY = "your key"
|
||||
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_deepseek_chat(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
client = openai.OpenAI(
|
||||
api_key=API_KEY,
|
||||
base_url=API_BASE
|
||||
)
|
||||
completion = client.chat.completions.create(
|
||||
model="deepseek-reasoner",
|
||||
messages=st.session_state.messages,
|
||||
stream=True,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
full_content = ''
|
||||
all_full_content = ''
|
||||
think_or_not = 1
|
||||
answer_or_not = 1
|
||||
for chunk in completion:
|
||||
response = chunk.choices[0].delta.content
|
||||
reasoning_content = chunk.choices[0].delta.reasoning_content
|
||||
if response == None:
|
||||
if think_or_not == 1:
|
||||
all_full_content += '[开始思考]\n\n'
|
||||
think_or_not = 0
|
||||
all_full_content += reasoning_content
|
||||
else:
|
||||
if answer_or_not == 1:
|
||||
all_full_content += '\n\n[结束思考]\n\n'
|
||||
answer_or_not = 0
|
||||
all_full_content += response
|
||||
full_content += response
|
||||
message_placeholder.markdown(all_full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant',
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": all_full_content, "avatar": "assistant"})
|
||||
return all_full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_deepseek_chat(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
||||
|
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import openai
|
||||
API_KEY = ""
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_gpt(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
client = openai.OpenAI(
|
||||
api_key=API_KEY,
|
||||
)
|
||||
completion = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-0125",
|
||||
messages=st.session_state.messages,
|
||||
stream=True,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
full_content = ''
|
||||
for chunk in completion:
|
||||
response = chunk.choices[0].delta.content or ""
|
||||
full_content += response
|
||||
message_placeholder.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant',
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_gpt(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
||||
|
@@ -0,0 +1,90 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
from zhipuai import ZhipuAI # 在这个版本中测试有效:Version-2.1.5.20250106
|
||||
|
||||
client = ZhipuAI(api_key="")
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, value=0.7, step=0.01, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.01, 1.0, value=0.95, step=0.01, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.7
|
||||
st.session_state['temperature_session'] = 0.95
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
def chatglm_chat(prompt=[]):
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4-air",
|
||||
messages=prompt,
|
||||
top_p= top_p,
|
||||
temperature= temperature,
|
||||
stream=True
|
||||
)
|
||||
return response
|
||||
|
||||
def getlength(text):
|
||||
length = 0
|
||||
for content in text:
|
||||
temp = content["content"]
|
||||
leng = len(temp)
|
||||
length += leng
|
||||
return length
|
||||
|
||||
def checklen(text):
|
||||
while (getlength(text) > 8000):
|
||||
del text[0]
|
||||
return text
|
||||
|
||||
def getText(role,content, text):
|
||||
jsoncon = {}
|
||||
jsoncon["role"] = role
|
||||
jsoncon["content"] = content
|
||||
text.append(jsoncon)
|
||||
return text
|
||||
|
||||
answer = ""
|
||||
if "text0" not in st.session_state:
|
||||
st.session_state.text0 = []
|
||||
if "messages0" not in st.session_state:
|
||||
st.session_state.messages0 = []
|
||||
def clear_all0():
|
||||
st.session_state.messages0 = []
|
||||
st.session_state.text0 = []
|
||||
if st.session_state.messages0 == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages0:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
prompt_text = st.chat_input("请在这里输入您的命令")
|
||||
|
||||
if prompt_text:
|
||||
if st.session_state.messages0 != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages0.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text0 = getText("user", prompt_text, st.session_state.text0)
|
||||
question = checklen(st.session_state.text0)
|
||||
response = chatglm_chat(question)
|
||||
for chunk in response:
|
||||
answer += chunk.choices[0].delta.content or ""
|
||||
message_placeholder.markdown(answer)
|
||||
st.session_state.text0 = getText("assistant", answer, st.session_state.text0)
|
||||
st.session_state.messages0.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all0, key='clear0')
|
@@ -0,0 +1,97 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
try:
|
||||
import zhipuai
|
||||
except:
|
||||
import os
|
||||
os.system('pip install zhipuai==1.0.7')
|
||||
import zhipuai
|
||||
|
||||
# 说明:当前代码只对 pip install zhipuai==1.0.7 有效,对最新版本不兼容。
|
||||
|
||||
# 从官网获取 API_KEY
|
||||
zhipuai.api_key = " "
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, value=0.7, step=0.01, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.01, 1.0, value=0.95, step=0.01, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.7
|
||||
st.session_state['temperature_session'] = 0.95
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
def chatglm_chat(prompt=[]):
|
||||
response = zhipuai.model_api.sse_invoke(
|
||||
model="glm-3-turbo",
|
||||
prompt=prompt,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
return response
|
||||
|
||||
def getlength(text):
|
||||
length = 0
|
||||
for content in text:
|
||||
temp = content["content"]
|
||||
leng = len(temp)
|
||||
length += leng
|
||||
return length
|
||||
|
||||
def checklen(text):
|
||||
while (getlength(text) > 8000):
|
||||
del text[0]
|
||||
return text
|
||||
|
||||
def getText(role,content, text):
|
||||
jsoncon = {}
|
||||
jsoncon["role"] = role
|
||||
jsoncon["content"] = content
|
||||
text.append(jsoncon)
|
||||
return text
|
||||
|
||||
answer = ""
|
||||
if "text0" not in st.session_state:
|
||||
st.session_state.text0 = []
|
||||
if "messages0" not in st.session_state:
|
||||
st.session_state.messages0 = []
|
||||
def clear_all0():
|
||||
st.session_state.messages0 = []
|
||||
st.session_state.text0 = []
|
||||
if st.session_state.messages0 == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages0:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
prompt_text = st.chat_input("请在这里输入您的命令")
|
||||
|
||||
if prompt_text:
|
||||
if st.session_state.messages0 != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages0.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text0 = getText("user", prompt_text, st.session_state.text0)
|
||||
question = checklen(st.session_state.text0)
|
||||
response = chatglm_chat(question)
|
||||
for event in response.events():
|
||||
answer += event.data
|
||||
message_placeholder.markdown(answer)
|
||||
st.session_state.text0 = getText("assistant", answer, st.session_state.text0)
|
||||
st.session_state.messages0.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all0, key='clear0')
|
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
from volcenginesdkarkruntime import Ark
|
||||
|
||||
# 从官网获取 API_KEY
|
||||
client = Ark(api_key='')
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_doubao(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
stream = client.chat.completions.create(
|
||||
model="",
|
||||
messages = st.session_state.messages,
|
||||
stream=True,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
)
|
||||
full_content = ''
|
||||
for chunk in stream:
|
||||
if not chunk.choices:
|
||||
continue
|
||||
response = chunk.choices[0].delta.content
|
||||
full_content += response
|
||||
message_placeholder.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant', 'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_doubao(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,86 @@
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
def get_access_token():
|
||||
"""
|
||||
使用 API Key,Secret Key 获取access_token,替换下列示例中的应用API Key、应用Secret Key
|
||||
"""
|
||||
url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=[应用API Key]&client_secret=[应用Secret Key]"
|
||||
|
||||
payload = json.dumps("")
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json'
|
||||
}
|
||||
response = requests.request("POST", url, headers=headers, data=payload)
|
||||
return response.json().get("access_token")
|
||||
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_ernie_speed_128k(prompt):
|
||||
st.session_state.messages.append({'role': "user", 'content': prompt})
|
||||
url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k?access_token=" + get_access_token()
|
||||
payload = json.dumps({
|
||||
"messages": st.session_state.messages,
|
||||
"top_p": top_p,
|
||||
"temperature": temperature,
|
||||
"stream": True
|
||||
})
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
response = requests.request("POST", url, headers=headers, data=payload, stream=True)
|
||||
full_content = ''
|
||||
for line in response.iter_lines():
|
||||
try:
|
||||
dict_data = json.loads(line.decode("UTF-8")[5:])
|
||||
full_content += dict_data['result']
|
||||
message_placeholder.markdown(full_content)
|
||||
except:
|
||||
pass
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': "assistant",
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_ernie_speed_128k(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import json
|
||||
import types
|
||||
# 安装:pip install --upgrade tencentcloud-sdk-python
|
||||
from tencentcloud.common import credential
|
||||
from tencentcloud.common.profile.client_profile import ClientProfile
|
||||
from tencentcloud.common.profile.http_profile import HttpProfile
|
||||
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder_hunyuan = st.empty()
|
||||
|
||||
def response_of_hunyuan(prompt):
|
||||
st.session_state.messages.append({'Role': 'user', 'Content': prompt})
|
||||
# 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密
|
||||
# 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305
|
||||
# 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取
|
||||
cred = credential.Credential("SecretId", "SecretKey")
|
||||
# 实例化一个http选项,可选的,没有特殊需求可以跳过
|
||||
httpProfile = HttpProfile()
|
||||
httpProfile.endpoint = "hunyuan.tencentcloudapi.com"
|
||||
|
||||
# 实例化一个client选项,可选的,没有特殊需求可以跳过
|
||||
clientProfile = ClientProfile()
|
||||
clientProfile.httpProfile = httpProfile
|
||||
# 实例化要请求产品的client对象,clientProfile是可选的
|
||||
client = hunyuan_client.HunyuanClient(cred, "", clientProfile)
|
||||
|
||||
# 实例化一个请求对象,每个接口都会对应一个request对象
|
||||
req = models.ChatCompletionsRequest()
|
||||
params = {
|
||||
"Model": "hunyuan-lite",
|
||||
"Messages": st.session_state.messages,
|
||||
"TopP": top_p,
|
||||
"Temperature": temperature,
|
||||
"Stream": True,
|
||||
}
|
||||
req.from_json_string(json.dumps(params))
|
||||
|
||||
# 返回的resp是一个ChatCompletionsResponse的实例,与请求对象对应
|
||||
resp = client.ChatCompletions(req)
|
||||
# 输出json格式的字符串回包
|
||||
response = ''
|
||||
if isinstance(resp, types.GeneratorType): # 流式响应
|
||||
for event in resp:
|
||||
answer = json.loads(event['data'])
|
||||
response += answer["Choices"][0]['Delta']['Content']
|
||||
message_placeholder_hunyuan.markdown(response)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'Role': 'assistant', 'Content': response})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": response, "avatar": "assistant"})
|
||||
return response
|
||||
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_hunyuan(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
322
2024.01.27_chat.guanjihuan.com/模型API - 讯飞 - 星火大模型/星火大模型.py
Normal file
322
2024.01.27_chat.guanjihuan.com/模型API - 讯飞 - 星火大模型/星火大模型.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
# 以下密钥信息从控制台获取
|
||||
appid = " " # 填写控制台中获取的 APPID 信息
|
||||
api_secret = " " # 填写控制台中获取的 APISecret 信息
|
||||
api_key =" " # 填写控制台中获取的 APIKey 信息
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('模型', expanded=True):
|
||||
API_model = st.radio('选择:', ('讯飞 - 星火大模型 V1.5', '讯飞 - 星火大模型 V2.0', '讯飞 - 星火大模型 V3.0', '讯飞 - 星火大模型 V3.5'), key='choose_API_model')
|
||||
if API_model == '讯飞 - 星火大模型 V1.5':
|
||||
API_model_0 = '星火大模型 V1.5'
|
||||
elif API_model == '讯飞 - 星火大模型 V2.0':
|
||||
API_model_0 = '星火大模型 V2.0'
|
||||
elif API_model == '讯飞 - 星火大模型 V3.0':
|
||||
API_model_0 = '星火大模型 V3.0'
|
||||
elif API_model == '讯飞 - 星火大模型 V3.5':
|
||||
API_model_0 = '星火大模型 V3.5'
|
||||
st.write('当前模型:'+API_model_0)
|
||||
|
||||
with st.expander('参数', expanded=True):
|
||||
top_k = st.slider('top_k', 1, 6, value=4, step=1, key='top_k_session')
|
||||
temperature = st.slider('temperature', 0.01, 1.0, value=0.5, step=0.01, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_k_session'] = 4
|
||||
st.session_state['temperature_session'] = 0.5
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
# 云端环境的服务地址
|
||||
if API_model == '讯飞 - 星火大模型 V1.5':
|
||||
domain = "general" # v1.5版本
|
||||
Spark_url = "ws://spark-api.xf-yun.com/v1.1/chat" # v1.5环境的地址
|
||||
elif API_model == '讯飞 - 星火大模型 V2.0':
|
||||
domain = "generalv2" # v2.0版本
|
||||
Spark_url = "ws://spark-api.xf-yun.com/v2.1/chat" # v2.0环境的地址
|
||||
elif API_model == '讯飞 - 星火大模型 V3.0':
|
||||
domain = "generalv3" # v3.0版本
|
||||
Spark_url = "ws://spark-api.xf-yun.com/v3.1/chat" # v3.0环境的地址
|
||||
elif API_model == '讯飞 - 星火大模型 V3.5':
|
||||
domain = "generalv3.5" # v3.5版本
|
||||
Spark_url = "ws://spark-api.xf-yun.com/v3.5/chat" # v3.5环境的地址
|
||||
|
||||
import _thread as thread
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
from urllib.parse import urlparse
|
||||
import ssl
|
||||
from datetime import datetime
|
||||
from time import mktime
|
||||
from urllib.parse import urlencode
|
||||
from wsgiref.handlers import format_date_time
|
||||
import websocket # 使用websocket_client
|
||||
answer = ""
|
||||
|
||||
class Ws_Param(object):
|
||||
# 初始化
|
||||
def __init__(self, APPID, APIKey, APISecret, Spark_url):
|
||||
self.APPID = APPID
|
||||
self.APIKey = APIKey
|
||||
self.APISecret = APISecret
|
||||
self.host = urlparse(Spark_url).netloc
|
||||
self.path = urlparse(Spark_url).path
|
||||
self.Spark_url = Spark_url
|
||||
|
||||
# 生成url
|
||||
def create_url(self):
|
||||
# 生成RFC1123格式的时间戳
|
||||
now = datetime.now()
|
||||
date = format_date_time(mktime(now.timetuple()))
|
||||
# 拼接字符串
|
||||
signature_origin = "host: " + self.host + "\n"
|
||||
signature_origin += "date: " + date + "\n"
|
||||
signature_origin += "GET " + self.path + " HTTP/1.1"
|
||||
# 进行hmac-sha256进行加密
|
||||
signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
|
||||
digestmod=hashlib.sha256).digest()
|
||||
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')
|
||||
authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'
|
||||
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
|
||||
# 将请求的鉴权参数组合为字典
|
||||
v = {
|
||||
"authorization": authorization,
|
||||
"date": date,
|
||||
"host": self.host
|
||||
}
|
||||
# 拼接鉴权参数,生成url
|
||||
url = self.Spark_url + '?' + urlencode(v)
|
||||
# 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致
|
||||
return url
|
||||
|
||||
# 收到websocket错误的处理
|
||||
def on_error(ws, error):
|
||||
print("### error:", error)
|
||||
|
||||
# 收到websocket关闭的处理
|
||||
def on_close(ws,one,two):
|
||||
print(" ")
|
||||
|
||||
# 收到websocket连接建立的处理
|
||||
def on_open(ws):
|
||||
thread.start_new_thread(run, (ws,))
|
||||
|
||||
def run(ws, *args):
|
||||
data = json.dumps(gen_params(appid=ws.appid, domain= ws.domain,question=ws.question))
|
||||
ws.send(data)
|
||||
|
||||
# 收到websocket消息的处理
|
||||
def on_message(ws, message):
|
||||
# print(message)
|
||||
data = json.loads(message)
|
||||
code = data['header']['code']
|
||||
if code != 0:
|
||||
print(f'请求错误: {code}, {data}')
|
||||
ws.close()
|
||||
else:
|
||||
choices = data["payload"]["choices"]
|
||||
status = choices["status"]
|
||||
content = choices["text"][0]["content"]
|
||||
global answer
|
||||
answer += content
|
||||
message_placeholder.markdown(answer)
|
||||
if status == 2:
|
||||
ws.close()
|
||||
|
||||
def gen_params(appid, domain,question):
|
||||
"""
|
||||
通过appid和用户的提问来生成请参数
|
||||
"""
|
||||
data = {
|
||||
"header": {
|
||||
"app_id": appid,
|
||||
"uid": "1234"
|
||||
},
|
||||
"parameter": {
|
||||
"chat": {
|
||||
"domain": domain,
|
||||
"random_threshold": 0.5,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"max_tokens": 4096,
|
||||
"auditing": "default"
|
||||
}
|
||||
},
|
||||
"payload": {
|
||||
"message": {
|
||||
"text": question
|
||||
}
|
||||
}
|
||||
}
|
||||
return data
|
||||
|
||||
def main_chat(appid, api_key, api_secret, Spark_url,domain, question):
|
||||
wsParam = Ws_Param(appid, api_key, api_secret, Spark_url)
|
||||
websocket.enableTrace(False)
|
||||
wsUrl = wsParam.create_url()
|
||||
ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
|
||||
ws.appid = appid
|
||||
ws.question = question
|
||||
ws.domain = domain
|
||||
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
|
||||
|
||||
def getlength(text):
|
||||
length = 0
|
||||
for content in text:
|
||||
temp = content["content"]
|
||||
leng = len(temp)
|
||||
length += leng
|
||||
return length
|
||||
|
||||
def checklen(text):
|
||||
while (getlength(text) > 8000):
|
||||
del text[0]
|
||||
return text
|
||||
|
||||
def getText(role,content, text):
|
||||
jsoncon = {}
|
||||
jsoncon["role"] = role
|
||||
jsoncon["content"] = content
|
||||
text.append(jsoncon)
|
||||
return text
|
||||
|
||||
prompt_text = st.chat_input("请在这里输入您的命令")
|
||||
|
||||
if API_model == '讯飞 - 星火大模型 V1.5':
|
||||
if "text" not in st.session_state:
|
||||
st.session_state.text = []
|
||||
if "messages" not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.text = []
|
||||
if st.session_state.messages == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
if prompt_text:
|
||||
if st.session_state.messages != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text = getText("user", prompt_text, st.session_state.text)
|
||||
question = checklen(st.session_state.text)
|
||||
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
|
||||
st.session_state.text = getText("assistant", answer, st.session_state.text)
|
||||
st.session_state.messages.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all)
|
||||
|
||||
elif API_model == '讯飞 - 星火大模型 V2.0':
|
||||
if "text2" not in st.session_state:
|
||||
st.session_state.text2 = []
|
||||
if "messages2" not in st.session_state:
|
||||
st.session_state.messages2 = []
|
||||
def clear_all2():
|
||||
st.session_state.messages2 = []
|
||||
st.session_state.text2 = []
|
||||
if st.session_state.messages2 == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages2:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
if prompt_text:
|
||||
if st.session_state.messages2 != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages2.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text2 = getText("user", prompt_text, st.session_state.text2)
|
||||
question = checklen(st.session_state.text2)
|
||||
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
|
||||
st.session_state.text2 = getText("assistant", answer, st.session_state.text2)
|
||||
st.session_state.messages2.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all2, key='clear2')
|
||||
|
||||
elif API_model == '讯飞 - 星火大模型 V3.0':
|
||||
if "text3" not in st.session_state:
|
||||
st.session_state.text3 = []
|
||||
if "messages3" not in st.session_state:
|
||||
st.session_state.messages3 = []
|
||||
def clear_all3():
|
||||
st.session_state.messages3 = []
|
||||
st.session_state.text3 = []
|
||||
if st.session_state.messages3 == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages3:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
if prompt_text:
|
||||
if st.session_state.messages3 != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages3.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text3 = getText("user", prompt_text, st.session_state.text3)
|
||||
question = checklen(st.session_state.text3)
|
||||
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
|
||||
st.session_state.text3 = getText("assistant", answer, st.session_state.text3)
|
||||
st.session_state.messages3.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all3, key='clear3')
|
||||
|
||||
elif API_model == '讯飞 - 星火大模型 V3.5':
|
||||
if "text4" not in st.session_state:
|
||||
st.session_state.text4 = []
|
||||
if "messages4" not in st.session_state:
|
||||
st.session_state.messages4 = []
|
||||
def clear_all4():
|
||||
st.session_state.messages4 = []
|
||||
st.session_state.text4 = []
|
||||
if st.session_state.messages4 == []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
for message in st.session_state.messages4:
|
||||
with st.chat_message(message["role"], avatar=message.get("avatar")):
|
||||
st.markdown(message["content"])
|
||||
if prompt_text:
|
||||
if st.session_state.messages4 != []:
|
||||
with st.chat_message("user", avatar="user"):
|
||||
input_placeholder = st.empty()
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
input_placeholder.markdown(prompt_text)
|
||||
st.session_state.messages4.append({"role": "user", "content": prompt_text, "avatar": "user"})
|
||||
st.session_state.text4 = getText("user", prompt_text, st.session_state.text4)
|
||||
question = checklen(st.session_state.text4)
|
||||
main_chat(appid,api_key,api_secret,Spark_url,domain,question)
|
||||
st.session_state.text4 = getText("assistant", answer, st.session_state.text4)
|
||||
st.session_state.messages4.append({"role": "robot", "content": answer, "avatar": "assistant"})
|
||||
st.rerun()
|
||||
button_clear = st.button("清空", on_click=clear_all4, key='clear4')
|
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
from dashscope import Generation
|
||||
from dashscope.api_entities.dashscope_response import Role
|
||||
import dashscope
|
||||
dashscope.api_key=""
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder_qwen = st.empty()
|
||||
|
||||
def response_of_qwen(prompt):
|
||||
st.session_state.messages.append({'role': Role.USER, 'content': prompt})
|
||||
responses = Generation.call("qwen-turbo",
|
||||
messages=st.session_state.messages,
|
||||
result_format='message',
|
||||
stream=True,
|
||||
incremental_output=True,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
)
|
||||
full_content = ''
|
||||
for response in responses:
|
||||
full_content += response.output.choices[0]['message']['content']
|
||||
message_placeholder_qwen.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': response.output.choices[0]['message']['role'],
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_qwen(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
import openai
|
||||
API_BASE = "https://api.lingyiwanwu.com/v1"
|
||||
API_KEY = "your key"
|
||||
|
||||
with st.sidebar:
|
||||
with st.expander('参数', expanded=True):
|
||||
top_p = st.slider('top_p', 0.01, 1.0, step=0.01, value=0.8, key='top_p_session')
|
||||
temperature = st.slider('temperature', 0.51, 1.0, step=0.01, value=0.85, key='temperature_session')
|
||||
def reset_parameter():
|
||||
st.session_state['top_p_session'] = 0.8
|
||||
st.session_state['temperature_session'] = 0.85
|
||||
reset_parameter_button = st.button('重置', on_click=reset_parameter)
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
def clear_all():
|
||||
st.session_state.messages = []
|
||||
st.session_state.ai_response = []
|
||||
|
||||
if 'messages' not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
if 'ai_response' not in st.session_state:
|
||||
st.session_state.ai_response = []
|
||||
|
||||
for ai_response in st.session_state.ai_response:
|
||||
with st.chat_message(ai_response["role"], avatar=ai_response.get("avatar")):
|
||||
st.markdown(ai_response["content"])
|
||||
|
||||
prompt_placeholder = st.chat_message("user", avatar='user')
|
||||
with st.chat_message("robot", avatar="assistant"):
|
||||
message_placeholder = st.empty()
|
||||
|
||||
def response_of_yi(prompt):
|
||||
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
||||
client = openai.OpenAI(
|
||||
api_key=API_KEY,
|
||||
base_url=API_BASE
|
||||
)
|
||||
completion = client.chat.completions.create(
|
||||
model="yi-spark",
|
||||
messages=st.session_state.messages,
|
||||
stream=True,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
full_content = ''
|
||||
for chunk in completion:
|
||||
response = chunk.choices[0].delta.content or ""
|
||||
full_content += response
|
||||
message_placeholder.markdown(full_content)
|
||||
if stop_button:
|
||||
break
|
||||
st.session_state.messages.append({'role': 'assistant',
|
||||
'content': full_content})
|
||||
st.session_state.ai_response.append({"role": "robot", "content": full_content, "avatar": "assistant"})
|
||||
return full_content
|
||||
|
||||
if prompt:
|
||||
prompt_placeholder.markdown(prompt)
|
||||
st.session_state.ai_response.append({"role": "user", "content": prompt, "avatar": 'user'})
|
||||
stop = st.empty()
|
||||
stop_button = stop.button('停止', key='break_response')
|
||||
response_of_yi(prompt)
|
||||
stop.empty()
|
||||
button_clear = st.button("清空", on_click=clear_all, key='clear')
|
||||
|
@@ -0,0 +1,22 @@
|
||||
from flask import Flask, request
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
def get_response(user_input):
|
||||
response = f"你说了'{user_input}',我想了想。"
|
||||
return response
|
||||
|
||||
@app.route('/', methods=['POST'])
|
||||
def API_server():
|
||||
try:
|
||||
data = request.get_json() # 从请求的 JSON 数据中获取用户输入
|
||||
user_input = data.get('prompt', '') # 获取 'prompt' 字段
|
||||
except Exception as e:
|
||||
return '请求错误!请联系 API 管理员。' # 如果解析失败,则返回错误信息
|
||||
if not user_input:
|
||||
return "请求错误!请联系 API 管理员。" # 如果没有输入,则返回错误
|
||||
ai_response = get_response(user_input)
|
||||
return ai_response
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True, threaded=True, port=123) # 完成测试后推荐把 debug=True 关闭,否则当文件夹内部有发生文件改变时可能会重载运行
|
11
2024.02.26_flask_example/API_server_direct/user_direct.py
Normal file
11
2024.02.26_flask_example/API_server_direct/user_direct.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import requests
|
||||
|
||||
url = "http://localhost:123" # API 地址
|
||||
data = {
|
||||
"prompt": "Hello, how are you?" # 请求数据,prompt 为用户输入
|
||||
}
|
||||
response = requests.post(url, json=data) # 发送 POST 请求,传递 JSON 数据
|
||||
if response.status_code == 200: # 检查响应是否成功
|
||||
print(response.text) # 直接获取并打印返回的完整响应
|
||||
else:
|
||||
print(f"请求失败,状态码: {response.status_code}")
|
@@ -0,0 +1,24 @@
|
||||
from flask import Flask, Response, request
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
def get_response(user_input):
|
||||
import time
|
||||
ai_response = f"你说了'{user_input}',我想了想。"
|
||||
for char in ai_response:
|
||||
yield f"{char}\n\n"
|
||||
time.sleep(0.2)
|
||||
|
||||
@app.route('/', methods=['POST'])
|
||||
def API_server():
|
||||
try:
|
||||
data = request.get_json() # 从请求的 JSON 数据中获取用户输入
|
||||
user_input = data.get('prompt', '') # 获取 'prompt' 字段
|
||||
except Exception as e:
|
||||
return '请求错误!请联系 API 管理员。' # 如果解析失败,则返回错误信息
|
||||
if not user_input:
|
||||
return "请求错误!请联系 API 管理员。" # 如果没有输入,则返回错误
|
||||
return Response(get_response(user_input), content_type='text/event-stream') # 返回流式响应
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True, threaded=True, port=123) # 完成测试后推荐把 debug=True 关闭,否则当文件夹内部有发生文件改变时可能会重载运行
|
13
2024.02.26_flask_example/API_server_stream/user_stream.py
Normal file
13
2024.02.26_flask_example/API_server_stream/user_stream.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import requests
|
||||
|
||||
url = "http://localhost:123" # API 地址
|
||||
data = {
|
||||
"prompt": "Hello, how are you?" # 请求数据,prompt 为用户输入
|
||||
}
|
||||
response = requests.post(url, json=data, stream=True) # 发送 POST 请求,传递 JSON 数据并启用流式响应
|
||||
if response.status_code == 200: # 检查响应是否成功
|
||||
for line in response.iter_lines(): # 逐步读取并打印流式响应
|
||||
if line:
|
||||
print(line.decode('utf-8'), end='', flush=True) # 解码并打印每一行流式返回的数据
|
||||
else:
|
||||
print(f"请求失败,状态码: {response.status_code}")
|
49
2024.11.21_kelly_formula/kelly_formula.py
Normal file
49
2024.11.21_kelly_formula/kelly_formula.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/43508
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
investment_ratio_array = np.arange(0.1, 1.1, 0.1)
|
||||
investment_times = 1000
|
||||
test_times = 100
|
||||
|
||||
# 几个例子:https://www.guanjihuan.com/archives/43412
|
||||
|
||||
# 例子(2)的参数
|
||||
p = 0.6 # 胜率
|
||||
b = 1 # 收益
|
||||
a = 1 # 损失
|
||||
|
||||
# # 例子(3)的参数
|
||||
# p = 0.5
|
||||
# b = 1
|
||||
# a = 0.5
|
||||
|
||||
win_array = [] # 胜出的仓位
|
||||
for i0 in range(test_times):
|
||||
# print(i0)
|
||||
capital_array = []
|
||||
for f in investment_ratio_array:
|
||||
capital = 1
|
||||
for _ in range(investment_times):
|
||||
investment = capital*f
|
||||
if investment>0:
|
||||
random_value = np.random.uniform(0, 1)
|
||||
if random_value<p:
|
||||
capital = capital+investment*b
|
||||
else:
|
||||
capital = capital-investment*a
|
||||
capital_array.append(capital)
|
||||
max_capital_index = capital_array.index(max(capital_array))
|
||||
win_array.append(investment_ratio_array[max_capital_index])
|
||||
|
||||
def kelly_formula(p, b, a):
|
||||
f=(p/a)-((1-p)/b)
|
||||
return f
|
||||
|
||||
print(kelly_formula(p=p, b=b, a=a))
|
||||
plt.hist(win_array, bins=100, color='skyblue')
|
||||
plt.show()
|
33
2024.12.02_MNIST/download_MNIST_and_show_image.py
Normal file
33
2024.12.02_MNIST/download_MNIST_and_show_image.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/43720
|
||||
"""
|
||||
|
||||
from torchvision import datasets, transforms
|
||||
|
||||
transform = transforms.Compose([transforms.ToTensor()]) # 定义数据预处理步骤(转换为Tensor)
|
||||
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform) # 加载 MNIST 数据集,训练集
|
||||
print(type(train_dataset))
|
||||
size_of_train_dataset = len(train_dataset)
|
||||
print(size_of_train_dataset)
|
||||
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform) # 加载 MNIST 数据集,测试集
|
||||
print(type(test_dataset))
|
||||
size_of_test_dataset = len(test_dataset)
|
||||
print(size_of_test_dataset)
|
||||
|
||||
import random
|
||||
rand_number = random.randint(0, size_of_train_dataset-1)
|
||||
image, label = train_dataset[rand_number] # 获取一张图像和标签
|
||||
print(type(image))
|
||||
print(image.shape)
|
||||
image = image.squeeze(0) # 去掉单通道的维度 (1, 28, 28) -> (28, 28)
|
||||
print(type(image))
|
||||
print(image.shape)
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
# import os
|
||||
# os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # 解决可能的多个 OpenMP 库版本冲突的问题。如果有 OMP 报错,可以试着使用这个解决。
|
||||
plt.imshow(image, cmap='gray') # 显示图像
|
||||
plt.title(f"Label: {label}") # 标签值(理论值)
|
||||
plt.axis('off') # 不显示坐标轴
|
||||
plt.show()
|
100
2024.12.02_MNIST/train_and_predict_MNIST.PY
Normal file
100
2024.12.02_MNIST/train_and_predict_MNIST.PY
Normal file
@@ -0,0 +1,100 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/43720
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
from torchvision import datasets, transforms
|
||||
|
||||
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,))]) # 数据转换(将图片转换为 Tensor 并进行归一化处理,均值和标准差为 0.5)
|
||||
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform) # 下载训练数据集
|
||||
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform) # 下载测试数据集
|
||||
|
||||
# 训练函数
|
||||
def train(model, train_loader, criterion, optimizer, num_epochs=5):
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
running_loss = 0.0
|
||||
correct = 0
|
||||
total = 0
|
||||
for images, labels in train_loader:
|
||||
# print(images.shape)
|
||||
optimizer.zero_grad() # 清除以前的梯度
|
||||
outputs = model(images) # 前向传播
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward() # 反向传播和优化
|
||||
optimizer.step()
|
||||
running_loss += loss.item()
|
||||
_, predicted = torch.max(outputs, 1) # 计算准确率
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
avg_loss = running_loss / len(train_loader)
|
||||
accuracy = 100 * correct / total
|
||||
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%')
|
||||
|
||||
# 测试函数
|
||||
def test(model, test_loader):
|
||||
model.eval() # 设置为评估模式
|
||||
correct = 0
|
||||
total = 0
|
||||
with torch.no_grad(): # 禁用梯度计算
|
||||
for images, labels in test_loader:
|
||||
outputs = model(images)
|
||||
_, predicted = torch.max(outputs, 1)
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
accuracy = 100 * correct / total
|
||||
print(f'Test Accuracy: {accuracy:.2f}%')
|
||||
|
||||
# 训练和测试
|
||||
def train_and_test(model, train_loader, test_loader):
|
||||
criterion = nn.CrossEntropyLoss() # 交叉熵损失
|
||||
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
||||
train(model, train_loader, criterion, optimizer, num_epochs=10)
|
||||
test(model, test_loader)
|
||||
|
||||
# 扁平化数据,并重建 DataLoader(用于全连接神经网络输入端的数据处理)
|
||||
def flatten_data(data_loader):
|
||||
images_array = []
|
||||
labels_array = []
|
||||
for images, labels in data_loader:
|
||||
images = torch.flatten(images, start_dim=1) # 除去batch维度后,其他维度展平
|
||||
images_array.append(images)
|
||||
labels_array.append(labels)
|
||||
images_array = torch.cat(images_array, dim=0)
|
||||
labels_array = torch.cat(labels_array, dim=0)
|
||||
dataset_new = TensorDataset(images_array, labels_array)
|
||||
loader_new = DataLoader(dataset_new, batch_size=64, shuffle=True)
|
||||
return loader_new
|
||||
|
||||
# 数据加载器
|
||||
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
|
||||
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
|
||||
|
||||
# 扁平化数据
|
||||
train_loader_new = flatten_data(train_loader)
|
||||
test_loader_new = flatten_data(test_loader)
|
||||
|
||||
# 安装软件包:pip install --upgrade guan
|
||||
import guan
|
||||
|
||||
hidden_size = 64
|
||||
|
||||
print('---全连接神经网络模型(包含一个隐藏层)---')
|
||||
model = guan.fully_connected_neural_network_with_one_hidden_layer(input_size=28*28, hidden_size=hidden_size, output_size=10, activation='relu')
|
||||
train_and_test(model, train_loader_new, test_loader_new)
|
||||
|
||||
print('---全连接神经网络模型(包含两个隐藏层)---')
|
||||
model = guan.fully_connected_neural_network_with_two_hidden_layers(input_size=28*28, hidden_size_1=hidden_size, hidden_size_2=hidden_size, output_size=10, activation_1='relu', activation_2='relu')
|
||||
train_and_test(model, train_loader_new, test_loader_new)
|
||||
|
||||
print('---全连接神经网络模型(包含三个隐藏层)---')
|
||||
model = guan.fully_connected_neural_network_with_three_hidden_layers(input_size=28*28, hidden_size_1=hidden_size, hidden_size_2=hidden_size, hidden_size_3=hidden_size, output_size=10, activation_1='relu', activation_2='relu', activation_3='relu')
|
||||
train_and_test(model, train_loader_new, test_loader_new)
|
||||
|
||||
print('---卷积神经网络模型(包含两个卷积层和两个全连接层)---')
|
||||
model = guan.convolutional_neural_network_with_two_convolutional_layers_and_two_fully_connected_layers(in_channels=1, out_channels_1=32, out_channels_2=64, kernel_size_1=3, kernel_size_2=3, stride_1=1, stride_2=1, padding_1=1, padding_2=1, pooling=1, pooling_kernel_size=2, pooling_stride=2, input_size=7*7*64, hidden_size_1=hidden_size, hidden_size_2=hidden_size, output_size=10)
|
||||
train_and_test(model, train_loader, test_loader)
|
16
2024.12.11_ollama/ollama_with_python.py
Normal file
16
2024.12.11_ollama/ollama_with_python.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# 直接输出
|
||||
import ollama
|
||||
response = ollama.chat(model="llama3.2:latest", messages=[{"role": "user","content": "你好"}], stream=False)
|
||||
print(response['message']['content'])
|
||||
|
||||
# 流式输出
|
||||
import ollama
|
||||
response = ollama.chat(model="llama3.2:latest", messages=[{"role": "user", "content": "你好"}], stream=True)
|
||||
for part in response:
|
||||
print(part['message']['content'], end='', flush=True)
|
||||
|
||||
# 流式输出,同时设置模型为后台常驻,需要手动 ollama stop 关闭
|
||||
import ollama
|
||||
response = ollama.chat(model="llama3.2:latest", messages=[{"role": "user", "content": "你好"}], stream=True, keep_alive=-1)
|
||||
for part in response:
|
||||
print(part['message']['content'], end='', flush=True)
|
30
2025.01.13_KMeans/KMeans_example.py
Normal file
30
2025.01.13_KMeans/KMeans_example.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/44839
|
||||
"""
|
||||
|
||||
import os
|
||||
os.environ["OMP_NUM_THREADS"] = "1" # KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting this environment variable
|
||||
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.datasets import make_blobs
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
X, y = make_blobs(n_samples=300, centers=4, random_state=42) # 生成示例数据(四类)
|
||||
print(X.shape)
|
||||
print(y.shape)
|
||||
plt.scatter(X[:, 0], X[:, 1]) # 显示数据
|
||||
plt.show()
|
||||
|
||||
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis') # 通过颜色显示数据原有的标签
|
||||
plt.show()
|
||||
|
||||
kmeans = KMeans(n_clusters=3, random_state=42) # 进行 KMeans 聚类(这里分为三类)
|
||||
kmeans.fit(X)
|
||||
labels = kmeans.labels_ # 获取聚类的标签
|
||||
print(labels.shape)
|
||||
|
||||
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis') # 绘制聚类结果
|
||||
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red', marker='X') # 绘制聚类中心
|
||||
plt.title('KMeans Result')
|
||||
plt.show()
|
11
2025.02.19_check_repository_to_commit/check_repository.py
Normal file
11
2025.02.19_check_repository_to_commit/check_repository.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import guan
|
||||
|
||||
# 在某个目录中寻找所有 Git 项目
|
||||
git_repository_array = guan.find_git_repositories('D:/data')
|
||||
guan.print_array(git_repository_array)
|
||||
print('\n---\n')
|
||||
|
||||
# 获取未 git commit 的 Git 项目
|
||||
git_repository_array_to_commit = guan.get_git_repositories_to_commit(git_repository_array)
|
||||
guan.print_array(git_repository_array_to_commit)
|
||||
print('\n---\n')
|
@@ -0,0 +1,20 @@
|
||||
import guan
|
||||
|
||||
# 在某个目录中寻找所有 Git 项目
|
||||
git_repository_array = guan.find_git_repositories('D:/data')
|
||||
guan.print_array(git_repository_array)
|
||||
print('\n---\n')
|
||||
|
||||
# 获取未 git commit 的 Git 项目
|
||||
git_repository_array_to_commit = guan.get_git_repositories_to_commit(git_repository_array)
|
||||
guan.print_array(git_repository_array_to_commit)
|
||||
print('\n---\n')
|
||||
|
||||
import os
|
||||
|
||||
# 完成 git commit
|
||||
for directory in git_repository_array_to_commit:
|
||||
print(directory)
|
||||
os.chdir(directory) # 进入 Git 仓库
|
||||
os.system('git add .') # 添加文件到暂存区
|
||||
os.system('git commit -m update') # 将暂存区内容添加到仓库中
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user