File size: 2,114 Bytes
c74b8b6
c0b1550
c74b8b6
 
 
c0b1550
c74b8b6
 
 
9176d4d
5f57f2b
 
 
 
9176d4d
 
c74b8b6
 
9176d4d
 
 
 
 
 
 
 
 
 
c0b1550
78cd1bd
9176d4d
 
c0b1550
30849ac
 
 
 
 
8f2697f
c0b1550
978e417
c0b1550
 
 
 
978e417
c0b1550
 
 
 
 
 
978e417
 
c0b1550
 
 
 
96e543b
c0b1550
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import io

import requests
import torch
from PIL import Image
from rich import print
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig


def download_image(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    resp = requests.get(url, headers=headers)
    resp.raise_for_status()
    return Image.open(io.BytesIO(resp.content)).convert("RGB")


def generate(images, prompt, processor, model, device, dtype, generation_config):
    inputs = processor(
        images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
    ).to(device=device, dtype=dtype)
    output = model.generate(**inputs, generation_config=generation_config)[0]
    response = processor.tokenizer.decode(output, skip_special_tokens=True)
    return response


def main():
    # step 1: Setup constant
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    dtype = torch.float16

    # step 2: Load Processor and Model
    processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
    generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
    model = AutoModelForCausalLM.from_pretrained(
        "StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
    ).to(device)

    # step 3: Fetch the images
    print(f"Download image...")
    image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
    images = [download_image(image_path)]

    # step 4: Generate the Findings section
    print(f"Analise image...")
    for anatomy in anatomies:
        prompt = f'Describe "{anatomy}"'
        response = generate(images, prompt, processor, model, device, dtype, generation_config)
        print(f"Generating the Findings for [{anatomy}]:")
        print(response)

    print(f"FIM !!")


if __name__ == '__main__':
    print(f"Start the Findings")
    anatomies = [
        "Airway"
    ]
    main()