File size: 4,203 Bytes
7fd5c24
 
 
4a5d667
7fd5c24
 
 
dff4bb8
c6c0775
 
 
 
dff4bb8
37b9ac3
 
dff4bb8
50c760b
769eaed
 
37b9ac3
769eaed
37b9ac3
7fd5c24
55e5d9c
042dd04
5ac0349
807cc13
 
5ac0349
37b9ac3
 
 
a600014
37b9ac3
4182d46
37b9ac3
 
 
 
 
55e5d9c
8f47b61
55e5d9c
8f47b61
55e5d9c
8f47b61
 
55e5d9c
8f47b61
 
 
37b9ac3
86d638a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37b9ac3
86d638a
 
37b9ac3
8f47b61
37b9ac3
 
7fd5c24
 
 
 
 
 
 
 
 
 
 
4a5d667
 
7fd5c24
 
 
 
 
 
 
86d638a
 
37b9ac3
7fd5c24
 
 
 
 
 
 
 
 
 
 
 
042dd04
 
 
cf3265a
7fd5c24
 
2b954d0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#!/usr/bin/env python3
from doctest import OutputChecker
import sys
import torch
import re
import os
import gradio as gr
import requests
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from torch.nn.functional import softmax
import numpy as np

#url = "https://github.com/simonepri/lm-scorer/tree/master/lm_scorer/models"
#resp = requests.get(url)

from sentence_transformers import SentenceTransformer, util
#from sentence_transformers import SentenceTransformer, util
#from sklearn.metrics.pairwise import cosine_similarity
#from lm_scorer.models.auto import AutoLMScorer as LMScorer
#from sentence_transformers import SentenceTransformer, util
#from sklearn.metrics.pairwise import cosine_similarity

#device = "cuda:0" if torch.cuda.is_available() else "cpu"
#model_sts = gr.Interface.load('huggingface/sentence-transformers/stsb-distilbert-base') 

model_sts = SentenceTransformer('stsb-distilbert-base')
#model_sts = SentenceTransformer('roberta-large-nli-stsb-mean-tokens')

#batch_size = 1
#scorer = LMScorer.from_pretrained('gpt2' , device=device, batch_size=batch_size)

#import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import numpy as np
import re


	
# Load pre-trained model 

# model = GPT2LMHeadModel.from_pretrained('distilgpt2', output_hidden_states = True, output_attentions = True)

# #model  =  gr.Interface.load('huggingface/distilgpt2', output_hidden_states = True, output_attentions = True)

# #model.eval()
# #tokenizer =  gr.Interface.load('huggingface/distilgpt2')

# #tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
# tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
# #tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')

tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')


def sentence_prob_mean(text):
    # Tokenize the input text and add special tokens
    input_ids = tokenizer.encode(text, return_tensors='pt')

    # Obtain model outputs
    with torch.no_grad():
        outputs = model(input_ids, labels=input_ids)
        logits = outputs.logits  # logits are the model outputs before applying softmax

    # Shift logits and labels so that tokens are aligned:
    shift_logits = logits[..., :-1, :].contiguous()
    shift_labels = input_ids[..., 1:].contiguous()

    # Calculate the softmax probabilities
    probs = softmax(shift_logits, dim=-1)

    # Gather the probabilities of the actual token IDs
    gathered_probs = torch.gather(probs, 2, shift_labels.unsqueeze(-1)).squeeze(-1)

    # Compute the mean probability across the tokens
    mean_prob = torch.mean(gathered_probs).item()

    return mean_prob




def cos_sim(a, b):
    return np.inner(a, b) / (np.linalg.norm(a) * (np.linalg.norm(b)))


  
def Visual_re_ranker(caption, visual_context_label, visual_context_prob):
    caption = caption 
    visual_context_label= visual_context_label
    visual_context_prob = visual_context_prob
    caption_emb = model_sts.encode(caption, convert_to_tensor=True)
    visual_context_label_emb = model_sts.encode(visual_context_label, convert_to_tensor=True)


    sim =  cosine_scores = util.pytorch_cos_sim(caption_emb, visual_context_label_emb)
    sim = sim.cpu().numpy()
    sim = str(sim)[1:-1]
    sim = str(sim)[1:-1] 

   # LM = cloze_prob(caption)
    LM = sentence_prob_mean(caption)
    #LM  = scorer.sentence_score(caption, reduce="mean")
    score = pow(float(LM),pow((1-float(sim))/(1+ float(sim)),1-float(visual_context_prob)))
    

    #return {"LM": float(LM)/1, "sim": float(sim)/1, "score": float(score)/1 }
    return {"init hypothesis": float(LM)/1, "Visual Belief Revision": float(score)/1 }
    #return LM, sim, score 



demo = gr.Interface(
    fn=Visual_re_ranker,
    description="Demo for Belief Revision based Caption Re-ranker with Visual Semantic Information",
    inputs=[gr.Textbox(value="a city street filled with traffic at night") , gr.Textbox(value="traffic"),  gr.Textbox(value="0.7458009")],
    #outputs=[gr.Textbox(value="Language Model Score") , gr.Textbox(value="Semantic Similarity Score"),  gr.Textbox(value="Belief revision score via visual context")],
    outputs="label",
    #share=True,
)
demo.launch()