guidel commited on
Commit
e66cce0
1 Parent(s): 254ab54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -11
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import streamlit as st
2
- from transformers import CLIPModel, CLIPProcessor
3
  import torch
4
  from PIL import Image
5
 
@@ -35,20 +35,31 @@ def inference_clip(options, image, processor, model):
35
  #################################
36
  #### LAYOUT
37
 
38
- CLIP_large = load_clip(model_size='large')
 
 
39
 
 
40
  picture_file = st.file_uploader("Picture :", type=["jpg", "jpeg", "png"])
41
  if picture_file is not None:
42
  image = Image.open(picture_file)
43
  st.image(image, caption='Please upload an image of the damage', use_column_width=True)
44
 
 
 
45
  #image
46
- default_options = ['black', 'white', 'gray', 'red', 'blue', 'silver', 'red', 'brown', 'green', 'orange', 'beige', 'pruple', 'gold', 'yellow']
47
- options = st.text_input(label="Please enter the classes", value=default_options)
48
- #options = list(options)
49
-
50
- # button to launch compute
51
- if st.button("Compute"):
52
- clip_processor, clip_model = load_clip(model_size='large')
53
- result = inference_clip(options = options, image = image, processor=clip_processor, model=clip_model)
54
- st.write(result)
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import CLIPModel, CLIPProcessor, pipeline
3
  import torch
4
  from PIL import Image
5
 
 
35
  #################################
36
  #### LAYOUT
37
 
38
+ #CLIP_large = load_clip(model_size='large')
39
+ model_name = "openai/clip-vit-large-patch14-336"
40
+ classifier = pipeline("zero-shot-image-classification", model = model_name)
41
 
42
+ #### Loading picture
43
  picture_file = st.file_uploader("Picture :", type=["jpg", "jpeg", "png"])
44
  if picture_file is not None:
45
  image = Image.open(picture_file)
46
  st.image(image, caption='Please upload an image of the damage', use_column_width=True)
47
 
48
+ col_l, col_r = st.columns(2)
49
+
50
  #image
51
+ with col_l:
52
+ default_options = ['black', 'white', 'gray', 'red', 'blue', 'silver', 'red', 'brown', 'green', 'orange', 'beige', 'pruple', 'gold', 'yellow']
53
+ options = st.text_input(label="Please enter the classes", value=default_options)
54
+ #options = list(options)
55
+
56
+ # button to launch compute
57
+ if st.button("Compute"):
58
+ #clip_processor, clip_model = load_clip(model_size='large')
59
+ #result = inference_clip(options = options, image = image, processor=clip_processor, model=clip_model)
60
+ scores = classifier(image,
61
+ candidate_labels = options)
62
+
63
+ with col_r:
64
+ #st.write(result)
65
+ st.dataframe(scores)