Spaces:
Sleeping
Sleeping
from transformers import T5Tokenizer, T5ForConditionalGeneration | |
# Load the fine-tuned model and tokenizer | |
model_name = "C:\\fine-tuned-model" | |
tokenizer = T5Tokenizer.from_pretrained(model_name) | |
model = T5ForConditionalGeneration.from_pretrained(model_name) | |
# Prompt | |
prompt = """Write a medical summary in detailed way with patient details like Sex, Age and medical details in a paragraph format from the below data | |
{ | |
"Sex": "M", | |
"ID": 585248, | |
"DateOfBirth": "08/10/1995", | |
"Age": "28 years", | |
"VisitDate": "09/25/2023", | |
"LogNumber": 6418481, | |
"Historian": "Self", | |
"TriageNotes": ["fever"], | |
"HistoryOfPresentIllness": { | |
"Complaint": [ | |
"The patient presents with a chief complaint of chills.", | |
"The problem is made better by exercise and rest.", | |
"The patient also reports change in appetite and chest pain/pressure as abnormal symptoms related to the complaint." | |
] | |
} | |
}""" | |
# Tokenize and generate text with sampling and different decoding parameters | |
input_ids = tokenizer.encode(prompt, return_tensors="pt", max_length=512) | |
generated_text = model.generate( | |
input_ids, | |
max_length=200, | |
num_beams=5, | |
temperature=0.9, # Adjust the temperature for more randomness | |
no_repeat_ngram_size=2, | |
top_k=50, | |
top_p=0.95, | |
early_stopping=True, | |
do_sample=True, | |
) | |
# Decode and print the generated text | |
decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True) | |
print(f"Generated Text: {decoded_text}") | |