///'from transformers import AutoModel, AutoTokenizer//nimport gradio as gr//nimport mdtex2html//nfrom utils import load_model_on_gpus//n//ntokenizer = AutoTokenizer.from_pretrained(///'THUDM/chatglm2-6b///',trust_remote_code=True)//nmodel = AutoModel.from_pretrained(///'THUDM/chatglm2-6b///',trust_remote_code=True, from_tf=True).cuda()//n# 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量//n# from utils import load_model_on_gpusdddddd//n# model = load_model_on_gpus(///'THUDM/chatglm2-6b///', num_gpus=2)//nmodel = model.eval()//n//n///'///'///'Override Chatbot.postprocess///'///'///'//n//ndef postprocess(self, y)://n if y is None://n return []//n for i, (message, response) in enumerate(y)://n y[i] = (//n None if message is None else mdtex2html.convert((message)),//n None if response is None else mdtex2html.convert(response),//n )//n return y//n//ngr.Chatbot.postprocess = postprocess//n//n//ndef parse_text(text)://n ///'///'///'copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT////'///'///'//n lines = text.split(///'//n///')//n lines = [line for line in lines if line != ///'///']//n count = 0//n for i, line in enumerate(lines)://n if ///'///' in line://n count += 1//n items = line.split(//'`//')//n if count % 2 == 1://n lines[i] = f'<pre><code class=///'language-{items[-1]}///'>'//n else://n lines[i] = f'<br></code></pre>'//n else://n if i > 0://n if count % 2 == 1://n line = line.replace(///'`///', ///'///`///')//n line = line.replace(///'<///', ///'&lt;///' )//n line = line.replace(///'>///', ///'&gt;///' )//n line = line.replace(///' ///', ///'&nbsp;///' )//n line = line.replace(///'*///', ///'&ast;///' )//n line = line.replace(///'_///', ///'&lowbar;///' )//n line = line.replace(///'-///', ///'&#45;///' )//n line = line.replace(///'.///', ///'&#46;///' )//n line = line.replace(///'!///', ///'&#33;///' )//n line = line.replace(///'(///', ///'&#40;///' )//n line = line.replace(///'//)///', ///'&#41;///' )//n line = line.replace(///'$///', ///'&#36;///' )//n lines[i] = ///'<br>///' + line//n text = ///'///'.join(lines)//n return text//n//n//ndef predict(input, chatbot, max_length, top_p, temperature, history, past_key_values)://n chatbot.append((parse_text(input), ///'///'))//n for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,//n return_past_key_values=True,//n max_length=max_length, top_p=top_p,//n temperature=temperature)://n chatbot[-1] = (parse_text(input), parse_text(response))//n//n yield chatbot, history, past_key_values//n//n//ndef reset_user_input()://n return gr.update(value=//'//'//')//n//n//ndef reset_state()://n return [], [], None//n//nwith gr.Blocks() as demo://n gr.HTML(///'<h1 align=///'center///'>ChatGLM2-6B</h1>///')//n//n chatbot = gr.Chatbot()//n with gr.Row()://n with gr.Column(scale=4)://n with gr.Column(scale=12)://n user_input = gr.Textbox(show_label=False, placeholder=///'Input...///', lines=10).style(//n container=False)//n with gr.Column(min_width=32, scale=1)://n submitBtn = gr.Button(///'Submit///', variant=///'primary///')//n with gr.Column(scale=1)://n emptyBtn = gr.Button(///'Clear History///')//n max_length = gr.Slider(0, 32768, value=8192, step=1.0, label=///'Maximum length///', interactive=True)//n top_p = gr.Slider(0, 1, value=0.8, step=0.01, label=///'Top P///', interactive=True)//n temperature = gr.Slider(0, 1, value=0.95, step=0.01, label=///'Temperature///', interactive=True)//n//n history = gr.State([])//n past_key_values = gr.State(None)//n//n submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],//n [chatbot, history, past_key_values], show_progress=True)//n submitBtn.click(reset_user_input, [], [user_input])//n//n emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)//n//ndemo.queue().launch(share=False, inbrowser=True)//n运行上面的代码报下面的错误应该如何解决//nAttributeError:No module named 'transformers_modules.chatglm2-6b'内容:这个错误可能是因为未正确安装或加载`transformers`库中的模型。请确保已正确安装`transformers`库和所需的模型,并确认模型名称是否正确。//n//n另外,可以尝试使用完整的模型名称而不是文件路径来加载模型,例如://n//npython//ntokenizer = AutoTokenizer.from_pretrained(///'THUDM/chatglm2-6b///', trust_remote_code=True)//nmodel = AutoModel.from_pretrained(///'THUDM/chatglm2-6b///', trust_remote_code=True, from_tf=True).cuda()//n```//n//n如果问题仍然存在,请提供更多详细信息,例如完整的错误消息和相关的代码部分,以便更好地帮助您解决问题。//n


原文地址: http://www.cveoy.top/t/topic/p83s 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录