Spaces:
Runtime error
Runtime error
Commit
·
bb947d6
1
Parent(s):
96a5494
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ from docx import Document
|
|
11 |
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
|
12 |
from io import BytesIO
|
13 |
|
14 |
-
|
15 |
# Set up OpenAI API
|
16 |
openai.api_key = "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"
|
17 |
|
@@ -19,14 +18,10 @@ openai.api_key = "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"
|
|
19 |
st.markdown(
|
20 |
"""
|
21 |
<style>
|
22 |
-
body {
|
23 |
-
background-color: transparent;
|
24 |
-
}
|
25 |
.container {
|
26 |
display: flex;
|
27 |
justify-content: center;
|
28 |
align-items: center;
|
29 |
-
background-color: rgba(255, 255, 255, 0.7);
|
30 |
border-radius: 15px;
|
31 |
padding: 20px;
|
32 |
}
|
@@ -42,14 +37,12 @@ model = CLIPModel().to(device)
|
|
42 |
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
|
43 |
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
|
44 |
|
45 |
-
|
46 |
def show_predicted_caption(image):
|
47 |
matches = predict_caption(
|
48 |
image, model, text_embeddings, testing_df["caption"]
|
49 |
)[0]
|
50 |
return matches
|
51 |
|
52 |
-
|
53 |
def generate_radiology_report(prompt):
|
54 |
response = openai.Completion.create(
|
55 |
engine="text-davinci-003",
|
@@ -61,7 +54,6 @@ def generate_radiology_report(prompt):
|
|
61 |
)
|
62 |
return response.choices[0].text.strip()
|
63 |
|
64 |
-
|
65 |
def chatbot_response(prompt):
|
66 |
response = openai.Completion.create(
|
67 |
engine="text-davinci-003",
|
@@ -73,7 +65,6 @@ def chatbot_response(prompt):
|
|
73 |
)
|
74 |
return response.choices[0].text.strip()
|
75 |
|
76 |
-
# Add this function to your code
|
77 |
def save_as_docx(text, filename):
|
78 |
document = Document()
|
79 |
document.add_paragraph(text)
|
@@ -82,13 +73,11 @@ def save_as_docx(text, filename):
|
|
82 |
output.seek(0)
|
83 |
return output.getvalue()
|
84 |
|
85 |
-
# Add this function to your code
|
86 |
def download_link(content, filename, link_text):
|
87 |
b64 = base64.b64encode(content).decode()
|
88 |
href = f'<a href="data:application/octet-stream;base64,{b64}" download="{filename}">{link_text}</a>'
|
89 |
return href
|
90 |
|
91 |
-
|
92 |
st.title("RadiXGPT: An Evolution of machine doctors towrads Radiology")
|
93 |
st.write("Upload Scan to get Radiological Report:")
|
94 |
|
@@ -104,7 +93,6 @@ if uploaded_file is not None:
|
|
104 |
caption = show_predicted_caption(image_np)
|
105 |
st.success(f"Caption: {caption}")
|
106 |
|
107 |
-
# Add the OpenAI API call here and generate the radiology report
|
108 |
radiology_report = generate_radiology_report(f"Write Complete Radiology Report for this: {caption}")
|
109 |
container = st.container()
|
110 |
with container:
|
@@ -117,10 +105,13 @@ if uploaded_file is not None:
|
|
117 |
st.write("Ask any questions you have about the radiology report:")
|
118 |
|
119 |
user_input = st.text_input("Enter your question:")
|
120 |
-
|
121 |
-
if user_input
|
122 |
-
st.write("Bot: You're welcome! If you have any more questions, feel free to ask.")
|
123 |
-
else:
|
124 |
-
# Add the OpenAI API call here and generate the answer to the user's question
|
125 |
answer = chatbot_response(f"Answer to the user's question based on the generated radiology report: {user_input}")
|
126 |
-
st.write(f"Bot: {answer}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
|
12 |
from io import BytesIO
|
13 |
|
|
|
14 |
# Set up OpenAI API
|
15 |
openai.api_key = "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"
|
16 |
|
|
|
18 |
st.markdown(
|
19 |
"""
|
20 |
<style>
|
|
|
|
|
|
|
21 |
.container {
|
22 |
display: flex;
|
23 |
justify-content: center;
|
24 |
align-items: center;
|
|
|
25 |
border-radius: 15px;
|
26 |
padding: 20px;
|
27 |
}
|
|
|
37 |
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
|
38 |
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
|
39 |
|
|
|
40 |
def show_predicted_caption(image):
|
41 |
matches = predict_caption(
|
42 |
image, model, text_embeddings, testing_df["caption"]
|
43 |
)[0]
|
44 |
return matches
|
45 |
|
|
|
46 |
def generate_radiology_report(prompt):
|
47 |
response = openai.Completion.create(
|
48 |
engine="text-davinci-003",
|
|
|
54 |
)
|
55 |
return response.choices[0].text.strip()
|
56 |
|
|
|
57 |
def chatbot_response(prompt):
|
58 |
response = openai.Completion.create(
|
59 |
engine="text-davinci-003",
|
|
|
65 |
)
|
66 |
return response.choices[0].text.strip()
|
67 |
|
|
|
68 |
def save_as_docx(text, filename):
|
69 |
document = Document()
|
70 |
document.add_paragraph(text)
|
|
|
73 |
output.seek(0)
|
74 |
return output.getvalue()
|
75 |
|
|
|
76 |
def download_link(content, filename, link_text):
|
77 |
b64 = base64.b64encode(content).decode()
|
78 |
href = f'<a href="data:application/octet-stream;base64,{b64}" download="{filename}">{link_text}</a>'
|
79 |
return href
|
80 |
|
|
|
81 |
st.title("RadiXGPT: An Evolution of machine doctors towrads Radiology")
|
82 |
st.write("Upload Scan to get Radiological Report:")
|
83 |
|
|
|
93 |
caption = show_predicted_caption(image_np)
|
94 |
st.success(f"Caption: {caption}")
|
95 |
|
|
|
96 |
radiology_report = generate_radiology_report(f"Write Complete Radiology Report for this: {caption}")
|
97 |
container = st.container()
|
98 |
with container:
|
|
|
105 |
st.write("Ask any questions you have about the radiology report:")
|
106 |
|
107 |
user_input = st.text_input("Enter your question:")
|
108 |
+
while user_input.lower() != "thank you":
|
109 |
+
if user_input:
|
|
|
|
|
|
|
110 |
answer = chatbot_response(f"Answer to the user's question based on the generated radiology report: {user_input}")
|
111 |
+
st.write(f"Bot: {answer}")
|
112 |
+
user_input = st.text_input("Enter your question:")
|
113 |
+
else:
|
114 |
+
break
|
115 |
+
|
116 |
+
if user_input.lower() == "thank you":
|
117 |
+
st.write("Bot: You're welcome! If you have any more questions, feel free to ask.")
|