autonomous019 commited on
Commit
c7d234c
1 Parent(s): 8028287

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -15
app.py CHANGED
@@ -41,20 +41,7 @@ def query(payload):
41
  #model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
42
 
43
 
44
- config = ViTConfig(num_hidden_layers=12, hidden_size=768)
45
- model = ViTForImageClassification(config)
46
 
47
- #print(config)
48
-
49
- feature_extractor = ViTFeatureExtractor()
50
- # or, to load one that corresponds to a checkpoint on the hub:
51
- #feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
52
-
53
- #the following gets called by classify_image()
54
- feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
55
- model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
56
- #google/vit-base-patch16-224, deepmind/vision-perceiver-conv
57
- image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
58
 
59
 
60
 
@@ -168,12 +155,30 @@ def self_caption(image):
168
  print(pred_dictionary)
169
  #return(pred_dictionary)
170
  preds = ' '.join(preds)
171
- story = create_story(preds)
172
- story = ' '.join(story)
 
 
173
  return story
174
 
175
 
176
  def classify_image(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  results = image_pipe(image)
178
 
179
  print("RESULTS")
 
41
  #model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
42
 
43
 
 
 
44
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
 
 
155
  print(pred_dictionary)
156
  #return(pred_dictionary)
157
  preds = ' '.join(preds)
158
+ #inference(input_sentence, max_length, sample_or_greedy, seed=42)
159
+ story = inference(preds, 32, "Sample", 42)
160
+ #story = create_story(preds)
161
+ #story = ' '.join(story)
162
  return story
163
 
164
 
165
  def classify_image(image):
166
+ config = ViTConfig(num_hidden_layers=12, hidden_size=768)
167
+ model = ViTForImageClassification(config)
168
+
169
+ #print(config)
170
+
171
+ feature_extractor = ViTFeatureExtractor()
172
+ # or, to load one that corresponds to a checkpoint on the hub:
173
+ #feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
174
+
175
+ #the following gets called by classify_image()
176
+ feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
177
+ model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
178
+ #google/vit-base-patch16-224, deepmind/vision-perceiver-conv
179
+ image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
180
+
181
+
182
  results = image_pipe(image)
183
 
184
  print("RESULTS")