File size: 1,156 Bytes
678a432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import psutil
import gradio as gr

from functools import partial
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

mem = psutil.virtual_memory()

tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)

model = AutoModelForSeq2SeqLM.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).quantize(bits=4, compile_parallel_kernel=True, parallel_num=2).cpu().float()

def chat(query, history=[]):
    _, history = model.chat(tokenizer, query, history, max_length=512)
    return history, history

description = "This is an unofficial chatbot application based on open source model ChatGLM-6B(https://github.com/THUDM/ChatGLM-6B), running on cpu(therefore max_length is limited to 512). \nIf you want to use this chat bot in your space, 'Duplicate this space' by click the button close to 'Linked Models'. \n"
title = "ChatGLM-6B Chatbot"
examples = [["Hello?"], ["δ½ ε₯½γ€‚"], ["δ»‹η»ζΈ…εŽ"]]

chatbot_interface = gr.Interface(
    fn=chat,
    title=title,
    description=description,
    examples=examples,
    inputs=["text", "state"],
    outputs=["chatbot", "state"]
)

chatbot_interface.launch()