Spaces:
Runtime error
Runtime error
Singularity666
commited on
Commit
·
baed763
1
Parent(s):
b8d9336
Update app.py
Browse files
app.py
CHANGED
@@ -50,8 +50,6 @@ st.markdown(
|
|
50 |
unsafe_allow_html=True,
|
51 |
)
|
52 |
|
53 |
-
|
54 |
-
|
55 |
device = torch.device("cpu")
|
56 |
|
57 |
testing_df = pd.read_csv("testing_df.csv")
|
@@ -59,14 +57,12 @@ model = CLIPModel().to(device)
|
|
59 |
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
|
60 |
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
|
61 |
|
62 |
-
|
63 |
def show_predicted_caption(image):
|
64 |
matches = predict_caption(
|
65 |
image, model, text_embeddings, testing_df["caption"]
|
66 |
)[0]
|
67 |
return matches
|
68 |
|
69 |
-
|
70 |
def generate_radiology_report(prompt):
|
71 |
response = openai.Completion.create(
|
72 |
engine="text-davinci-003",
|
@@ -78,7 +74,6 @@ def generate_radiology_report(prompt):
|
|
78 |
)
|
79 |
return response.choices[0].text.strip()
|
80 |
|
81 |
-
|
82 |
def chatbot_response(prompt):
|
83 |
response = openai.Completion.create(
|
84 |
engine="text-davinci-003",
|
@@ -90,7 +85,6 @@ def chatbot_response(prompt):
|
|
90 |
)
|
91 |
return response.choices[0].text.strip()
|
92 |
|
93 |
-
# Add this function to your code
|
94 |
def save_as_docx(text, filename):
|
95 |
document = Document()
|
96 |
document.add_paragraph(text)
|
@@ -99,16 +93,21 @@ def save_as_docx(text, filename):
|
|
99 |
output.seek(0)
|
100 |
return output.getvalue()
|
101 |
|
102 |
-
# Add this function to your code
|
103 |
def download_link(content, filename, link_text):
|
104 |
b64 = base64.b64encode(content).decode()
|
105 |
href = f'<a href="data:application/octet-stream;base64,{b64}" download="{filename}">{link_text}</a>'
|
106 |
return href
|
107 |
|
108 |
-
|
109 |
st.title("RadiXGPT: An Evolution of machine doctors towards Radiology")
|
110 |
-
st.write("Upload Scan to get Radiological Report:")
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
|
113 |
if uploaded_file is not None:
|
114 |
image = Image.open(uploaded_file)
|
@@ -119,17 +118,21 @@ if uploaded_file is not None:
|
|
119 |
with st.spinner("Generating caption..."):
|
120 |
image_np = np.array(image)
|
121 |
caption = show_predicted_caption(image_np)
|
122 |
-
|
123 |
st.success(f"Caption: {caption}")
|
124 |
|
125 |
-
#
|
126 |
radiology_report = generate_radiology_report(f"Write Complete Radiology Report for this: {caption}")
|
|
|
|
|
|
|
|
|
127 |
container = st.container()
|
128 |
with container:
|
129 |
st.header("Radiology Report")
|
130 |
-
st.write(
|
131 |
-
st.markdown(download_link(save_as_docx(
|
132 |
-
|
133 |
# Add the chatbot functionality
|
134 |
st.header("1-to-1 Consultation")
|
135 |
st.write("Ask any questions you have about the radiology report:")
|
@@ -143,7 +146,7 @@ if uploaded_file is not None:
|
|
143 |
if user_input.lower() == "thank you":
|
144 |
st.write("Bot: You're welcome! If you have any more questions, feel free to ask.")
|
145 |
else:
|
146 |
-
#
|
147 |
prompt = f"Answer to the user's question based on the generated radiology report: {user_input}"
|
148 |
for history_item in chat_history:
|
149 |
prompt += f"\nUser: {history_item['user']}"
|
|
|
50 |
unsafe_allow_html=True,
|
51 |
)
|
52 |
|
|
|
|
|
53 |
device = torch.device("cpu")
|
54 |
|
55 |
testing_df = pd.read_csv("testing_df.csv")
|
|
|
57 |
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
|
58 |
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
|
59 |
|
|
|
60 |
def show_predicted_caption(image):
|
61 |
matches = predict_caption(
|
62 |
image, model, text_embeddings, testing_df["caption"]
|
63 |
)[0]
|
64 |
return matches
|
65 |
|
|
|
66 |
def generate_radiology_report(prompt):
|
67 |
response = openai.Completion.create(
|
68 |
engine="text-davinci-003",
|
|
|
74 |
)
|
75 |
return response.choices[0].text.strip()
|
76 |
|
|
|
77 |
def chatbot_response(prompt):
|
78 |
response = openai.Completion.create(
|
79 |
engine="text-davinci-003",
|
|
|
85 |
)
|
86 |
return response.choices[0].text.strip()
|
87 |
|
|
|
88 |
def save_as_docx(text, filename):
|
89 |
document = Document()
|
90 |
document.add_paragraph(text)
|
|
|
93 |
output.seek(0)
|
94 |
return output.getvalue()
|
95 |
|
|
|
96 |
def download_link(content, filename, link_text):
|
97 |
b64 = base64.b64encode(content).decode()
|
98 |
href = f'<a href="data:application/octet-stream;base64,{b64}" download="{filename}">{link_text}</a>'
|
99 |
return href
|
100 |
|
|
|
101 |
st.title("RadiXGPT: An Evolution of machine doctors towards Radiology")
|
|
|
102 |
|
103 |
+
# Collect user's personal information
|
104 |
+
st.subheader("Personal Information")
|
105 |
+
first_name = st.text_input("First Name")
|
106 |
+
last_name = st.text_input("Last Name")
|
107 |
+
age = st.number_input("Age", min_value=0, max_value=120, value=25, step=1)
|
108 |
+
gender = st.selectbox("Gender", ["Male", "Female", "Other"])
|
109 |
+
|
110 |
+
st.write("Upload Scan to get Radiological Report:")
|
111 |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
|
112 |
if uploaded_file is not None:
|
113 |
image = Image.open(uploaded_file)
|
|
|
118 |
with st.spinner("Generating caption..."):
|
119 |
image_np = np.array(image)
|
120 |
caption = show_predicted_caption(image_np)
|
121 |
+
|
122 |
st.success(f"Caption: {caption}")
|
123 |
|
124 |
+
# Generate the radiology report
|
125 |
radiology_report = generate_radiology_report(f"Write Complete Radiology Report for this: {caption}")
|
126 |
+
|
127 |
+
# Add personal information to the radiology report
|
128 |
+
radiology_report_with_personal_info = f"Patient Name: {first_name} {last_name}\nAge: {age}\nGender: {gender}\n\n{radiology_report}"
|
129 |
+
|
130 |
container = st.container()
|
131 |
with container:
|
132 |
st.header("Radiology Report")
|
133 |
+
st.write(radiology_report_with_personal_info)
|
134 |
+
st.markdown(download_link(save_as_docx(radiology_report_with_personal_info, "radiology_report.docx"), "radiology_report.docx", "Download Report as DOCX"), unsafe_allow_html=True)
|
135 |
+
|
136 |
# Add the chatbot functionality
|
137 |
st.header("1-to-1 Consultation")
|
138 |
st.write("Ask any questions you have about the radiology report:")
|
|
|
146 |
if user_input.lower() == "thank you":
|
147 |
st.write("Bot: You're welcome! If you have any more questions, feel free to ask.")
|
148 |
else:
|
149 |
+
# Generate the answer to the user's question
|
150 |
prompt = f"Answer to the user's question based on the generated radiology report: {user_input}"
|
151 |
for history_item in chat_history:
|
152 |
prompt += f"\nUser: {history_item['user']}"
|