File size: 5,447 Bytes
79e0b51
 
 
 
 
 
 
203d6b2
 
 
79e0b51
 
 
 
 
203d6b2
 
 
 
 
79e0b51
173b282
 
203d6b2
173b282
 
203d6b2
173b282
 
758e1e6
 
203d6b2
62baf56
758e1e6
8028287
 
 
173b282
 
 
 
 
 
79e0b51
173b282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04c6deb
 
173b282
3a579df
173b282
 
 
 
 
 
04c6deb
7e72612
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e72612
79e0b51
c7d234c
fd787f5
7e72612
79e0b51
 
 
 
c7d234c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd787f5
 
fe25003
7e72612
79e0b51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e72612
79e0b51
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
from PIL import Image
import requests
import matplotlib.pyplot as plt
import gradio as gr
from gradio.mix import Parallel
import torch
from transformers import (
    ViTConfig, 
    ViTForImageClassification,
    ViTFeatureExtractor,
    AutoModelForCausalLM,
    LogitsProcessorList,
    MinLengthLogitsProcessor,
    StoppingCriteriaList,
    MaxLengthCriteria,
    ImageClassificationPipeline, 
    PerceiverForImageClassificationConvProcessing, 
    PerceiverFeatureExtractor,
    VisionEncoderDecoderModel,
    AutoTokenizer,
)
import json
import os
#get from local file spaces_info.py
from spaces_info import description, examples, initial_prompt_value

#some constants
API_URL = os.getenv("API_URL")
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
##Bloom Inference API
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
#HF_API_TOKEN = os.environ["HF_API_TOKEN"]
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}

print(API_URL)
print(HF_API_TOKEN)


def query(payload):
    print(payload)
    response = requests.request("POST", API_URL, json=payload, headers={"Authorization": f"Bearer {HF_API_TOKEN}"})
    print(response)
    return json.loads(response.content.decode("utf-8"))



def inference(input_sentence, max_length, sample_or_greedy, seed=42):
    if sample_or_greedy == "Sample":
        parameters = {
            "max_new_tokens": max_length,
            "top_p": 0.9,
            "do_sample": True,
            "seed": seed,
            "early_stopping": False,
            "length_penalty": 0.0,
            "eos_token_id": None,
        }
    else:
        parameters = {
            "max_new_tokens": max_length,
            "do_sample": False,
            "seed": seed,
            "early_stopping": False,
            "length_penalty": 0.0,
            "eos_token_id": None,
        }

    payload = {"inputs": input_sentence, "parameters": parameters,"options" : {"use_cache": False} }

    data = query(payload)

    if "error" in data:
        return (None, None, f"<span style='color:red'>ERROR: {data['error']} </span>")

    generation = data[0]["generated_text"].split(input_sentence, 1)[1]
    print(generation)
    '''
    return (
        input_sentence
        + prompt_to_generation
        + generation
        + after_generation,
        data[0]["generated_text"],
        "",
    )
    '''
    return input_sentence + generation





def self_caption(image):
  repo_name = "ydshieh/vit-gpt2-coco-en"
  test_image = image
  feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
  model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
  pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values
  print("Pixel Values")
  print(pixel_values)
  # autoregressively generate text (using beam search or other decoding strategy)
  generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True)
  
  # decode into text
  preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)
  preds = [pred.strip() for pred in preds]
  print("Predictions")
  print(preds)
  print("The preds type is : ",type(preds))
  pred_keys = ["Prediction"]
  pred_value = preds

  pred_dictionary = dict(zip(pred_keys, pred_value))
  print("Pred dictionary")
  print(pred_dictionary)
 
  preds = ' '.join(preds)
  #inference(input_sentence, max_length, sample_or_greedy, seed=42)
  story = inference(preds, 64, "Sample", 42) 

  return story


def classify_image(image):
  config = ViTConfig(num_hidden_layers=12, hidden_size=768)
  model = ViTForImageClassification(config)

  #print(config)

  feature_extractor = ViTFeatureExtractor()
  # or, to load one that corresponds to a checkpoint on the hub:
  #feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")

  #the following gets called by classify_image() 
  feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
  model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
  #google/vit-base-patch16-224, deepmind/vision-perceiver-conv
  image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
 
  
  results = image_pipe(image)
  
  print("RESULTS")
  print(results)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  print("OUTPUT")
  print(output)
  return output


image = gr.inputs.Image(type="pil")
label = gr.outputs.Label(num_top_classes=5)
examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ]
#examples = [ ["batter.jpg"] ] 
title = "Generate a Story from an Image using BLOOM"
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well, story generated using Bigscience/BLOOM"
article = "<p style='text-align: center'></p>"

img_info1 = gr.Interface(
    fn=classify_image,
    inputs=image,
    outputs=label,
)

img_info2 = gr.Interface(
    fn=self_caption,
    inputs=image,
    #outputs=label,
    outputs = [
    gr.outputs.Textbox(label = 'Story')
],
)

Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)