Spaces:
Runtime error
Runtime error
Commit
·
8f2697f
1
Parent(s):
5f57f2b
teste
Browse files- app.py +45 -28
- app2.py → app_old.py +22 -13
app.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import io
|
2 |
-
|
3 |
import requests
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
-
|
7 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
8 |
|
9 |
|
|
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
@@ -16,6 +16,7 @@ def download_image(url):
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
|
|
19 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
20 |
inputs = processor(
|
21 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
@@ -25,33 +26,49 @@ def generate(images, prompt, processor, model, device, dtype, generation_config)
|
|
25 |
return response
|
26 |
|
27 |
|
|
|
28 |
def main():
|
29 |
-
#
|
30 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
31 |
dtype = torch.float16
|
32 |
|
33 |
-
#
|
34 |
-
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
35 |
-
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
36 |
-
model = AutoModelForCausalLM.from_pretrained(
|
37 |
-
|
38 |
-
).to(device)
|
39 |
-
|
40 |
-
#
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import io
|
|
|
2 |
import requests
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
+
import streamlit as st
|
6 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
7 |
|
8 |
|
9 |
+
# Função para baixar e processar a imagem
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
19 |
+
# Função para gerar a descrição com base na imagem e no prompt
|
20 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
21 |
inputs = processor(
|
22 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
|
|
26 |
return response
|
27 |
|
28 |
|
29 |
+
# Função principal da interface Streamlit
|
30 |
def main():
|
31 |
+
# Definir o dispositivo de execução (GPU ou CPU)
|
32 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
33 |
dtype = torch.float16
|
34 |
|
35 |
+
# Carregar o modelo e o processador
|
36 |
+
#processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
37 |
+
#generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
38 |
+
#model = AutoModelForCausalLM.from_pretrained(
|
39 |
+
# "StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
40 |
+
#).to(device)
|
41 |
+
|
42 |
+
# Título da aplicação Streamlit
|
43 |
+
st.title("Análise de Imagem Médica com CheXagent")
|
44 |
+
|
45 |
+
# Entrada para o URL da imagem
|
46 |
+
image_url = st.text_input("Insira o URL da imagem:", "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg")
|
47 |
+
|
48 |
+
# Entrada para o prompt
|
49 |
+
prompt = st.text_input("Insira o prompt para a análise:", "Descreva o \"Airway\"")
|
50 |
+
|
51 |
+
# Botão para gerar a resposta
|
52 |
+
if st.button("Gerar Análise"):
|
53 |
+
try:
|
54 |
+
# Passo 1: Baixar a imagem
|
55 |
+
st.write("Baixando a imagem...")
|
56 |
+
images = [download_image(image_url)]
|
57 |
+
|
58 |
+
# Exibir a imagem na interface
|
59 |
+
st.image(images[0], caption="Imagem fornecida", use_column_width=True)
|
60 |
+
|
61 |
+
# Passo 2: Gerar a resposta
|
62 |
+
st.write(f"Gerando a análise para: {prompt}...")
|
63 |
+
#response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
64 |
+
|
65 |
+
# Exibir a resposta gerada
|
66 |
+
st.subheader("Análise Gerada:")
|
67 |
+
#st.write(response)
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
st.error(f"Ocorreu um erro: {e}")
|
71 |
+
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
main()
|
app2.py → app_old.py
RENAMED
@@ -1,15 +1,21 @@
|
|
1 |
import io
|
|
|
2 |
import requests
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
-
|
6 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
7 |
|
|
|
8 |
def download_image(url):
|
9 |
-
|
|
|
|
|
|
|
10 |
resp.raise_for_status()
|
11 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
12 |
|
|
|
13 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
14 |
inputs = processor(
|
15 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
@@ -18,31 +24,34 @@ def generate(images, prompt, processor, model, device, dtype, generation_config)
|
|
18 |
response = processor.tokenizer.decode(output, skip_special_tokens=True)
|
19 |
return response
|
20 |
|
|
|
21 |
def main():
|
22 |
-
|
23 |
-
|
24 |
-
device = "cuda"
|
25 |
dtype = torch.float16
|
26 |
|
|
|
27 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
28 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
29 |
model = AutoModelForCausalLM.from_pretrained(
|
30 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
31 |
).to(device)
|
32 |
|
|
|
33 |
image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
|
34 |
images = [download_image(image_path)]
|
35 |
|
36 |
-
|
37 |
-
"Airway", "Breathing", "Cardiac", "Diaphragm",
|
38 |
-
"Everything else (e.g., mediastinal contours, bones, soft tissues, tubes, valves, and pacemakers)"
|
39 |
-
]
|
40 |
-
|
41 |
for anatomy in anatomies:
|
42 |
prompt = f'Describe "{anatomy}"'
|
43 |
response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
44 |
-
|
45 |
-
|
|
|
46 |
|
47 |
if __name__ == '__main__':
|
48 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import io
|
2 |
+
|
3 |
import requests
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
+
from rich import print
|
7 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
8 |
|
9 |
+
|
10 |
def download_image(url):
|
11 |
+
headers = {
|
12 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
13 |
+
}
|
14 |
+
resp = requests.get(url, headers=headers)
|
15 |
resp.raise_for_status()
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
+
|
19 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
20 |
inputs = processor(
|
21 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
|
|
24 |
response = processor.tokenizer.decode(output, skip_special_tokens=True)
|
25 |
return response
|
26 |
|
27 |
+
|
28 |
def main():
|
29 |
+
# step 1: Setup constant
|
30 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
31 |
dtype = torch.float16
|
32 |
|
33 |
+
# step 2: Load Processor and Model
|
34 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
35 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
38 |
).to(device)
|
39 |
|
40 |
+
# step 3: Fetch the images
|
41 |
image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
|
42 |
images = [download_image(image_path)]
|
43 |
|
44 |
+
# step 4: Generate the Findings section
|
|
|
|
|
|
|
|
|
45 |
for anatomy in anatomies:
|
46 |
prompt = f'Describe "{anatomy}"'
|
47 |
response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
48 |
+
print(f"Generating the Findings for [{anatomy}]:")
|
49 |
+
print(response)
|
50 |
+
|
51 |
|
52 |
if __name__ == '__main__':
|
53 |
+
anatomies = [
|
54 |
+
"Airway", "Breathing", "Cardiac", "Diaphragm",
|
55 |
+
"Everything else (e.g., mediastinal contours, bones, soft tissues, tubes, valves, and pacemakers)"
|
56 |
+
]
|
57 |
+
main()
|