from transformers import T5Tokenizer, T5ForConditionalGeneration
import streamlit as st
import json
# Load the fine-tuned model and tokenizer
model_name = "."
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
# Function to generate text based on input
def generate_text(input_text):
# Tokenize and generate text with sampling and different decoding parameters
input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=512)
generated_text = model.generate(
input_ids,
max_length=200,
num_beams=5,
temperature=0.9, # Adjust the temperature for more randomness
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
early_stopping=True,
do_sample=True,
)
# Decode and return the generated text
decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True)
return decoded_text
# Streamlit app
def main():
# Apply custom styling for the title
st.markdown("
Medical Summary - Text Generation
", unsafe_allow_html=True)
# Textbox for user input
user_input = st.text_area("Enter Text:", "")
# Button to trigger text generation
if st.button("Compute"):
if user_input:
# Call the generate_text function with user input
result = generate_text(user_input)
# Display the generated text in a box with word wrap
#st.markdown(f"**Generated Text:**\n\n```\n{result}\n```", unsafe_allow_html=True)
#st.text(result)
# Display the generated text in a div with word wrap and auto-increasing height
#st.markdown(f"**Generated Text:**\n\n```\n{result}\n```
", unsafe_allow_html=True)
# Display the generated text in a div with word wrap and auto-increasing width and height
#st.markdown(f"**Generated Text:**\n\n```\n{result}\n```
", unsafe_allow_html=True)
# Display the generated text in a text area with word wrap
st.text_area("Generated Text:", result, key="generated_text")
# Run the Streamlit app
if __name__ == "__main__":
main()