File size: 1,637 Bytes
920c384
 
313c250
526d2c9
 
920c384
79a7d58
526d2c9
 
 
591425a
 
 
526d2c9
920c384
3ffe848
 
 
591425a
3ffe848
 
 
 
 
 
 
 
 
 
 
 
 
591425a
 
d77f563
 
591425a
79a7d58
3ffe848
 
 
 
313c250
d77f563
313c250
920c384
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47


import gradio as gr
#from transformers import AutoModelForSequenceClassification, AutoTokenizer
#from transformers import BertTokenizer, BertLMHeadModel

# Load pre-trained model and tokenizer
#tokenizer = BertTokenizer.from_pretrained('clinicalBERT')
#model = BertLMHeadModel.from_pretrained('clinicalBERT')

#from transformers import AutoTokenizer, AutoModel
#tokenizer = AutoTokenizer.from_pretrained("medicalai/ClinicalBERT")
#model = AutoModel.from_pretrained("medicalai/ClinicalBERT")


#from transformers import AutoTokenizer, AutoModelForSequenceClassification
#tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
#model = AutoModelForSequenceClassification.from_pretrained("emilyalsentzer/Bio_ClinicalBERT", num_labels=2)


import gradio as gr
from transformers import pipeline

# Carica il modello
model = pipeline("text-generation", model="emilyalsentzer/Bio_ClinicalBERT")

# Definisci la funzione per generare il testo
def generate_text(prompt):
    return model(prompt, max_length=50)[0]['generated_text']

# Crea l'interfaccia
interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")

# Esempio di utilizzo del modello
#inputs = tokenizer("Esempio di testo da classificare", return_tensors="pt")
#outputs = model(**inputs)

# Define a function to generate text using the model
#def generate_text(input_text):
#    input_ids = tokenizer.encode(input_text, return_tensors='pt')
#    output = model.generate(input_ids)
#    return tokenizer.decode(output[0], skip_special_tokens=True)

#interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")

interface.launch()