gpt2test01 / app.py
arjunascagnetto's picture
Update app.py
d77f563
raw
history blame
1.64 kB
import gradio as gr
#from transformers import AutoModelForSequenceClassification, AutoTokenizer
#from transformers import BertTokenizer, BertLMHeadModel
# Load pre-trained model and tokenizer
#tokenizer = BertTokenizer.from_pretrained('clinicalBERT')
#model = BertLMHeadModel.from_pretrained('clinicalBERT')
#from transformers import AutoTokenizer, AutoModel
#tokenizer = AutoTokenizer.from_pretrained("medicalai/ClinicalBERT")
#model = AutoModel.from_pretrained("medicalai/ClinicalBERT")
#from transformers import AutoTokenizer, AutoModelForSequenceClassification
#tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
#model = AutoModelForSequenceClassification.from_pretrained("emilyalsentzer/Bio_ClinicalBERT", num_labels=2)
import gradio as gr
from transformers import pipeline
# Carica il modello
model = pipeline("text-generation", model="emilyalsentzer/Bio_ClinicalBERT")
# Definisci la funzione per generare il testo
def generate_text(prompt):
return model(prompt, max_length=50)[0]['generated_text']
# Crea l'interfaccia
interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
# Esempio di utilizzo del modello
#inputs = tokenizer("Esempio di testo da classificare", return_tensors="pt")
#outputs = model(**inputs)
# Define a function to generate text using the model
#def generate_text(input_text):
# input_ids = tokenizer.encode(input_text, return_tensors='pt')
# output = model.generate(input_ids)
# return tokenizer.decode(output[0], skip_special_tokens=True)
#interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
interface.launch()