File size: 6,924 Bytes
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173b282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173b282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
from transformers import ViTConfig, ViTForImageClassification
from transformers import ViTFeatureExtractor
from PIL import Image
import requests
import matplotlib.pyplot as plt
import gradio as gr
from gradio.mix import Parallel
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
from transformers import VisionEncoderDecoderModel
from transformers import AutoTokenizer
import torch
from transformers import (
    AutoModelForCausalLM,
    LogitsProcessorList,
    MinLengthLogitsProcessor,
    StoppingCriteriaList,
    MaxLengthCriteria,
)
import json
import os
from screenshot import (
    before_prompt,
    prompt_to_generation,
    after_generation,
    js_save,
    js_load_script,
)
from spaces_info import description, examples, initial_prompt_value

API_URL = os.getenv("API_URL")
HF_API_TOKEN = os.getenv("HF_API_TOKEN")


def query(payload):
    print(payload)
    response = requests.request("POST", API_URL, json=payload, headers={"Authorization": f"Bearer {HF_API_TOKEN}"})
    print(response)
    return json.loads(response.content.decode("utf-8"))

# https://github.com/NielsRogge/Transformers-Tutorials/blob/master/HuggingFace_vision_ecosystem_overview_(June_2022).ipynb
# option 1: load with randomly initialized weights (train from scratch)

#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")


config = ViTConfig(num_hidden_layers=12, hidden_size=768)
model = ViTForImageClassification(config)

#print(config)

feature_extractor = ViTFeatureExtractor()
# or, to load one that corresponds to a checkpoint on the hub:
#feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")

#the following gets called by classify_image() 
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
#google/vit-base-patch16-224, deepmind/vision-perceiver-conv
image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)




def inference(input_sentence, max_length, sample_or_greedy, seed=42):
    if sample_or_greedy == "Sample":
        parameters = {
            "max_new_tokens": max_length,
            "top_p": 0.9,
            "do_sample": True,
            "seed": seed,
            "early_stopping": False,
            "length_penalty": 0.0,
            "eos_token_id": None,
        }
    else:
        parameters = {
            "max_new_tokens": max_length,
            "do_sample": False,
            "seed": seed,
            "early_stopping": False,
            "length_penalty": 0.0,
            "eos_token_id": None,
        }

    payload = {"inputs": input_sentence, "parameters": parameters,"options" : {"use_cache": False} }

    data = query(payload)

    if "error" in data:
        return (None, None, f"<span style='color:red'>ERROR: {data['error']} </span>")

    generation = data[0]["generated_text"].split(input_sentence, 1)[1]
    return (
        before_prompt
        + input_sentence
        + prompt_to_generation
        + generation
        + after_generation,
        data[0]["generated_text"],
        "",
    )





def create_story(text_seed):
  #tokenizer = AutoTokenizer.from_pretrained("gpt2")
  #model = AutoModelForCausalLM.from_pretrained("gpt2")
  
  #eleutherAI gpt-3 based
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
  model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")

  # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
  model.config.pad_token_id = model.config.eos_token_id

  #input_prompt = "It might be possible to"
  input_prompt = text_seed
  input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids

  # instantiate logits processors
  logits_processor = LogitsProcessorList(
    [
        MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id),
    ]
  )
  stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=100)])

  outputs = model.greedy_search(
    input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
  )

  result_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
  return result_text






def self_caption(image):
  repo_name = "ydshieh/vit-gpt2-coco-en"
  #test_image = "cats.jpg"
  test_image = image
  #url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
  #test_image = Image.open(requests.get(url, stream=True).raw)
  #test_image.save("cats.png")
  
  feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
  model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
  pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values
  print("Pixel Values")
  print(pixel_values)
  # autoregressively generate text (using beam search or other decoding strategy)
  generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True)
  
  # decode into text
  preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)
  preds = [pred.strip() for pred in preds]
  print("Predictions")
  print(preds)
  print("The preds type is : ",type(preds))
  pred_keys = ["Prediction"]
  pred_value = preds

  pred_dictionary = dict(zip(pred_keys, pred_value))
  print("Pred dictionary")
  print(pred_dictionary)
  #return(pred_dictionary)
  preds = ' '.join(preds)
  story = create_story(preds)
  story = ' '.join(story)
  return story


def classify_image(image):
  results = image_pipe(image)
  
  print("RESULTS")
  print(results)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  print("OUTPUT")
  print(output)
  return output


image = gr.inputs.Image(type="pil")
label = gr.outputs.Label(num_top_classes=5)
examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ] 
title = "Generate a Story from an Image"
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well"
article = "<p style='text-align: center'></p>"

img_info1 = gr.Interface(
    fn=classify_image,
    inputs=image,
    outputs=label,
)

img_info2 = gr.Interface(
    fn=self_caption,
    inputs=image,
    #outputs=label,
    outputs = [
    gr.outputs.Textbox(label = 'Story')
],
)

Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
#Parallel(img_info1,img_info2, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)