WXM2000 commited on
Commit
0562e00
1 Parent(s): d6bd11d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,13 +1,13 @@
1
  from transformers import AutoModelForTokenClassification,AutoTokenizer,pipeline
2
  import gradio as gr
3
  import torch
4
- model = AutoModelForTokenClassification.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese')#,local_files_only=True)#cache_dir="C:\2023\Huggingface_4_12\Gradio Tutorial",force_download=True)
5
  # model = AutoModelForTokenClassification.from_pretrained('C:\\2023\Huggingface_4_12\Gradio Tutorial\cache3\Huggingface_4_12\Gradio Tutorial\models--uer--roberta-base-finetuned-cluener2020-chinese\blobs\3d20fdef0b0f04d283e1693ef4c030b133fa7c3c')
6
- tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese')#,local_files_only=True)
7
 
8
 
9
  ner_pipeline = pipeline('ner', model=model, tokenizer=tokenizer)
10
- examples=["江苏警方通报特斯拉冲进店铺","安卓手机变天!没想到“照相机”和“折叠机”跑分也能破140万"]
11
 
12
  def ner(text):
13
  output1 = ner_pipeline(text)
 
1
  from transformers import AutoModelForTokenClassification,AutoTokenizer,pipeline
2
  import gradio as gr
3
  import torch
4
+ model = AutoModelForTokenClassification.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese',local_files_only=True)#cache_dir="C:\2023\Huggingface_4_12\Gradio Tutorial",force_download=True)
5
  # model = AutoModelForTokenClassification.from_pretrained('C:\\2023\Huggingface_4_12\Gradio Tutorial\cache3\Huggingface_4_12\Gradio Tutorial\models--uer--roberta-base-finetuned-cluener2020-chinese\blobs\3d20fdef0b0f04d283e1693ef4c030b133fa7c3c')
6
+ tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-cluener2020-chinese',local_files_only=True)
7
 
8
 
9
  ner_pipeline = pipeline('ner', model=model, tokenizer=tokenizer)
10
+ examples=["江苏警方通报特斯拉冲进店铺"]
11
 
12
  def ner(text):
13
  output1 = ner_pipeline(text)