WXM2000's picture
Update app.py
33fd717
raw
history blame
2.08 kB
from transformers import AutoModelForTokenClassification,AutoTokenizer,pipeline
import gradio as gr
import torch
model = AutoModelForTokenClassification.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese')#,local_files_only=True)#cache_dir="C:\2023\Huggingface_4_12\Gradio Tutorial",force_download=True)
# model = AutoModelForTokenClassification.from_pretrained('C:\\2023\Huggingface_4_12\Gradio Tutorial\cache3\Huggingface_4_12\Gradio Tutorial\models--uer--roberta-base-finetuned-cluener2020-chinese\blobs\3d20fdef0b0f04d283e1693ef4c030b133fa7c3c')
tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese')#,local_files_only=True)
ner_pipeline = pipeline('ner', model=model, tokenizer=tokenizer)
examples=["江苏警方通报特斯拉冲进店铺"]
def ner(text):
output1 = ner_pipeline(text)
output = [output1[0]]
if output[0]['entity'][1] == '-':
output[0]['entity'] = output[0]['entity'][2:len(output[0]['entity'])]
# j = 0
for i in range(1,len(output1)):
if output1[i]['entity'][1] == '-':
output1[i]['entity'] = output1[i]['entity'][2:len(output1[i]['entity'])]
dict1 = output1[i]
u = len(output) - 1
dict0 = output[u]
if (dict0['end'] == dict1['start']) and (dict0['entity'] == dict1['entity']):
dict = {
'entity':dict0['entity'],
'score':min(dict0['score'],dict1['score']),
'index':dict1['index'],
'word':dict0['word']+dict1['word'],
'start':dict0['start'],
'end':dict1['end'],
}
output[len(output) - 1] = dict
else:
dict = dict1
output.append(dict)
# print('output_before',output)
# print('output_after',output)
return {"text": text, "entities": output}
demo = gr.Interface(ner,
gr.Textbox(placeholder="Enter sentence here..."),
gr.HighlightedText(),
examples=examples)
demo.launch()