thlinhares commited on
Commit
9176d4d
·
1 Parent(s): fb815d1

ajuste main

Browse files
Files changed (1) hide show
  1. app.py +44 -15
app.py CHANGED
@@ -3,23 +3,52 @@ import io
3
  import requests
4
  import torch
5
  from PIL import Image
 
6
  from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
7
 
8
- # step 1: Setup constant
9
- device = "cuda"
10
- dtype = torch.float16
11
 
12
- # step 2: Load Processor and Model
13
- processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
14
- generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
15
- model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True)
16
 
17
- # step 3: Fetch the images
18
- image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
19
- images = [Image.open(io.BytesIO(requests.get(image_path).content)).convert("RGB")]
20
 
21
- # step 4: Generate the Findings section
22
- prompt = f'Describe "Airway"'
23
- inputs = processor(images=images, text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt").to(device=device, dtype=dtype)
24
- output = model.generate(**inputs, generation_config=generation_config)[0]
25
- response = processor.tokenizer.decode(output, skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import requests
4
  import torch
5
  from PIL import Image
6
+ from rich import print
7
  from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
8
 
 
 
 
9
 
10
+ def download_image(url):
11
+ resp = requests.get(url)
12
+ resp.raise_for_status()
13
+ return Image.open(io.BytesIO(resp.content)).convert("RGB")
14
 
 
 
 
15
 
16
+ def generate(images, prompt, processor, model, device, dtype, generation_config):
17
+ inputs = processor(
18
+ images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
19
+ ).to(device=device, dtype=dtype)
20
+ output = model.generate(**inputs, generation_config=generation_config)[0]
21
+ response = processor.tokenizer.decode(output, skip_special_tokens=True)
22
+ return response
23
+
24
+
25
+ def main():
26
+ # step 1: Setup constant
27
+ device = "cuda"
28
+ dtype = torch.float16
29
+
30
+ # step 2: Load Processor and Model
31
+ processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
32
+ generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
33
+ model = AutoModelForCausalLM.from_pretrained(
34
+ "StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
35
+ ).to(device)
36
+
37
+ # step 3: Fetch the images
38
+ image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
39
+ images = [download_image(image_path)]
40
+
41
+ # step 4: Generate the Findings section
42
+ for anatomy in anatomies:
43
+ prompt = f'Describe "{anatomy}"'
44
+ response = generate(images, prompt, processor, model, device, dtype, generation_config)
45
+ print(f"Generating the Findings for [{anatomy}]:")
46
+ print(response)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ anatomies = [
51
+ "Airway", "Breathing", "Cardiac", "Diaphragm",
52
+ "Everything else (e.g., mediastinal contours, bones, soft tissues, tubes, valves, and pacemakers)"
53
+ ]
54
+ main()