File size: 1,486 Bytes
23975dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from transformers import T5Tokenizer, T5ForConditionalGeneration

# Load the fine-tuned model and tokenizer
model_name = "C:\\fine-tuned-model"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)

# Prompt
prompt = """Write a medical summary in detailed way with patient details like Sex, Age and medical details in a paragraph format from the below data 
{
  
  "Sex": "M",
  "ID": 585248,
  "DateOfBirth": "08/10/1995",
  "Age": "28 years",
  "VisitDate": "09/25/2023",
  "LogNumber": 6418481,
  "Historian": "Self",
  "TriageNotes": ["fever"],
  "HistoryOfPresentIllness": {
    "Complaint": [
      "The patient presents with a chief complaint of chills.",
      "The problem is made better by exercise and rest.",
      "The patient also reports change in appetite and chest pain/pressure as abnormal symptoms related to the complaint."
    ]
  }
}"""

# Tokenize and generate text with sampling and different decoding parameters
input_ids = tokenizer.encode(prompt, return_tensors="pt", max_length=512)
generated_text = model.generate(
    input_ids,
    max_length=200,
    num_beams=5,
    temperature=0.9,  # Adjust the temperature for more randomness
    no_repeat_ngram_size=2,
    top_k=50,
    top_p=0.95,
    early_stopping=True,
    do_sample=True,
)

# Decode and print the generated text
decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True)
print(f"Generated Text: {decoded_text}")