update
This commit is contained in:
parent
cd525482d3
commit
9a0c8a8f83
@ -38,7 +38,7 @@ elif choose_load_method == 1:
|
||||
model_chatglm3, tokenizer_chatglm3 = load_model_chatglm3()
|
||||
|
||||
elif choose_load_method == 2:
|
||||
# 在CPU上加载(需要25G内存,对话速度会比较慢)
|
||||
# 在CPU上加载(需要25G内存,对话速度会比较慢,不推荐)
|
||||
@st.cache_resource
|
||||
def load_model_chatglm3():
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
|
62
开源语音模型 Bark/Bark.py
Normal file
62
开源语音模型 Bark/Bark.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""
|
||||
This code is supported by the website: https://www.guanjihuan.com
|
||||
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/38502
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
st.set_page_config(
|
||||
page_title="Chat",
|
||||
layout='wide'
|
||||
)
|
||||
|
||||
choose_load_method = 0
|
||||
|
||||
if choose_load_method == 0:
|
||||
# GPU加载(需要5G显存)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark")
|
||||
model = AutoModel.from_pretrained("suno/bark").to("cuda")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
elif choose_load_method == 1:
|
||||
# CPU加载(需要9G内存,运行速度慢,不推荐)
|
||||
@st.cache_resource
|
||||
def load_bark_model():
|
||||
from transformers import AutoProcessor, AutoModel
|
||||
processor = AutoProcessor.from_pretrained("suno/bark")
|
||||
model = AutoModel.from_pretrained("suno/bark")
|
||||
return model, processor
|
||||
model, processor = load_bark_model()
|
||||
|
||||
prompt = st.chat_input("在这里输入您的命令")
|
||||
|
||||
prompt_placeholder = st.empty()
|
||||
with prompt_placeholder.container():
|
||||
with st.chat_message("user", avatar='user'):
|
||||
pass
|
||||
|
||||
if prompt:
|
||||
with prompt_placeholder.container():
|
||||
with st.chat_message("user", avatar='user'):
|
||||
st.write(prompt)
|
||||
st.write('正在转换中,请稍后。')
|
||||
|
||||
inputs = processor(
|
||||
text=[prompt],
|
||||
return_tensors="pt",
|
||||
)
|
||||
if choose_load_method == 0:
|
||||
inputs = {key: value.to("cuda") for key, value in inputs.items()}
|
||||
|
||||
speech_values = model.generate(**inputs, do_sample=True)
|
||||
|
||||
import scipy
|
||||
sampling_rate = 24_000
|
||||
scipy.io.wavfile.write('./a.wav', rate=sampling_rate, data=speech_values.cpu().numpy().squeeze())
|
||||
|
||||
audio_file = open('./a.wav', 'rb')
|
||||
audio_bytes = audio_file.read()
|
||||
st.audio(audio_bytes, format='audio/wav')
|
Loading…
x
Reference in New Issue
Block a user