Spaces:
Runtime error
Runtime error
import streamlit as st | |
import pickle | |
import pandas as pd | |
import torch | |
from PIL import Image | |
import numpy as np | |
from main import predict_caption, CLIPModel, get_text_embeddings | |
import openai | |
import base64 | |
from docx import Document | |
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT | |
from io import BytesIO | |
# Set up OpenAI API | |
openai.api_key = "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" | |
# Custom CSS for the page | |
st.markdown( | |
""" | |
<style> | |
body { | |
background-color: transparent; | |
} | |
.container { | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
background-color: rgba(255, 255, 255, 0.7); | |
border-radius: 15px; | |
padding: 20px; | |
} | |
.stApp { | |
background-color: transparent; | |
} | |
.stText, .stMarkdown, .stTextInput>label, .stButton>button>span { | |
color: #1c1c1c !important; /* Set the dark text color for text elements */ | |
} | |
.stButton>button>span { | |
color: initial !important; /* Reset the text color for the 'Generate Caption' button */ | |
} | |
.stMarkdown h1, .stMarkdown h2 { | |
color: #ff6b81 !important; /* Set the text color of h1 and h2 elements to soft red-pink */ | |
font-weight: bold; /* Set the font weight to bold */ | |
border: 2px solid #ff6b81; /* Add a bold border around the headers */ | |
padding: 10px; /* Add padding to the headers */ | |
border-radius: 5px; /* Add border-radius to the headers */ | |
} | |
</style> | |
""", | |
unsafe_allow_html=True, | |
) | |
device = torch.device("cpu") | |
testing_df = pd.read_csv("testing_df.csv") | |
model = CLIPModel().to(device) | |
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu'))) | |
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device) | |
def show_predicted_caption(image): | |
matches = predict_caption( | |
image, model, text_embeddings, testing_df["caption"] | |
)[0] | |
return matches | |
def generate_radiology_report(prompt): | |
response = openai.Completion.create( | |
engine="text-davinci-003", | |
prompt=prompt, | |
max_tokens=800, | |
n=1, | |
stop=None, | |
temperature=1, | |
) | |
return response.choices[0].text.strip() | |
def chatbot_response(prompt): | |
response = openai.Completion.create( | |
engine="text-davinci-003", | |
prompt=prompt, | |
max_tokens=500, | |
n=1, | |
stop=None, | |
temperature=0.8, | |
) | |
return response.choices[0].text.strip() | |
def save_as_docx(text, filename): | |
document = Document() | |
document.add_paragraph(text) | |
with BytesIO() as output: | |
document.save(output) | |
output.seek(0) | |
return output.getvalue() | |
def download_link(content, filename, link_text): | |
b64 = base64.b64encode(content).decode() | |
href = f'<a href="data:application/octet-stream;base64,{b64}" download="{filename}">{link_text}</a>' | |
return href | |
st.title("RadiXGPT: An Evolution of machine doctors towards Radiology") | |
# Collect user's personal information | |
st.subheader("Personal Information") | |
first_name = st.text_input("First Name") | |
last_name = st.text_input("Last Name") | |
age = st.number_input("Age", min_value=0, max_value=120, value=25, step=1) | |
gender = st.selectbox("Gender", ["Male", "Female", "Other"]) | |
st.write("Upload Scan to get Radiological Report:") | |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) | |
if uploaded_file is not None: | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
st.write("") | |
if st.button("Generate Caption"): | |
with st.spinner("Generating caption..."): | |
image_np = np.array(image) | |
caption = show_predicted_caption(image_np) | |
st.success(f"Caption: {caption}") | |
# Generate the radiology report | |
radiology_report = generate_radiology_report(f"Write Complete Radiology Report for this: {caption}") | |
# Add personal information to the radiology report | |
radiology_report_with_personal_info = f"Patient Name: {first_name} {last_name}\nAge: {age}\nGender: {gender}\n\n{radiology_report}" | |
container = st.container() | |
with container: | |
st.header("Radiology Report") | |
st.write(radiology_report_with_personal_info) | |
st.markdown(download_link(save_as_docx(radiology_report_with_personal_info, "radiology_report.docx"), "radiology_report.docx", "Download Report as DOCX"), unsafe_allow_html=True) | |
# Add the chatbot functionality | |
st.header("1-to-1 Consultation") | |
st.write("Ask any questions you have about the radiology report:") | |
user_input = st.text_input("Enter your question:") | |
chat_history = [] | |
if user_input: | |
chat_history.append({"user": user_input}) | |
if user_input.lower() == "thank you": | |
st.write("Bot: You're welcome! If you have any more questions, feel free to ask.") | |
else: | |
# Generate the answer to the user's question | |
prompt = f"Answer to the user's question based on the generated radiology report: {user_input}" | |
for history_item in chat_history: | |
prompt += f"\nUser: {history_item['user']}" | |
if 'bot' in history_item: | |
prompt += f"\nBot: {history_item['bot']}" | |
answer = chatbot_response(prompt) | |
chat_history[-1]["bot"] = answer | |
st.write(f"Bot: {answer}") | |