from transformers import AutoTokenizer, AutoModelForCausalLM from itertools import chain import gradio as gr import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) tokenizer = AutoTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall") model = AutoModelForCausalLM.from_pretrained("uer/gpt2-chinese-cluecorpussmall").to(device) def generate_text(prompt,length=500): inputs = tokenizer(prompt,add_special_tokens=False, return_tensors="pt").to(device) txt = tokenizer.decode(model.generate(inputs["input_ids"], max_length=length, num_beams=2, no_repeat_ngram_size=2, early_stopping=True, pad_token_id = 0 )[0]) #Replace text replacements = { '[': "", ']': "", 'S': "", 'E': "", 'P': "", 'U': "", 'N': "", 'K': "" } new_text = ''.join(chain.from_iterable(replacements.get(word, [word]) for word in txt)) return new_text with gr.Blocks() as web: gr.Markdown("