ns commited on
Commit
9c1e2f3
1 Parent(s): e55aa10

initial commit

Browse files
Files changed (2) hide show
  1. app.py +30 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from transformers import BlipForConditionalGeneration, BlipProcessor
4
+
5
+ processor = BlipProcessor.from_pretrained("nathansutton/generate-cxr")
6
+ model = BlipForConditionalGeneration.from_pretrained("nathansutton/generate-cxr")
7
+
8
+ def humanize_report(report: str) -> str:
9
+ report = report.replace("impression :","IMPRESSION:\n").replace("findings :","FINDINGS:\n").replace("indication :","INDICATION:\n")
10
+ sentences = [x.split("\n") for x in report.split(".") if x]
11
+ sentences = [item for sublist in sentences for item in sublist]
12
+ sentences = [x.strip().capitalize() if ':' not in x else x for x in sentences]
13
+ return ". ".join(sentences).replace(":.",":").replace("IMPRESSION:","\n\nIMPRESSION:\n\n").replace("FINDINGS:","\n\nFINDINGS:\n\n").replace("INDICATION:","INDICATION:\n\n")
14
+
15
+ indication = st.text_input("What is the indication for this study")
16
+ img_file_buffer = st.file_uploader("Upload a single view from a Chest X-Ray (JPG preferred)")
17
+ if img_file_buffer is not None and indication is not None:
18
+
19
+ image = Image.open(img_file_buffer)
20
+ st.image(image, use_column_width=True)
21
+ inputs = processor(
22
+ images=Image.open(img_file_buffer),
23
+ text='indication:' + indication,
24
+ return_tensors="pt"
25
+ )
26
+ output = model.generate(**inputs,max_length=512)
27
+ report = processor.decode(output[0], skip_special_tokens=True)
28
+ st.write(humanize_report(report))
29
+
30
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit>=1.17.0,<1.18
2
+ transformers[torch]>=4.26.0,<4.27
3
+ pillow>=9.4.0,<9.5