Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -36,9 +36,9 @@ import os
|
|
36 |
openai.api_key= os.getenv('openai_appkey')
|
37 |
def gpt3_short(question,vqa_answer,caption):
|
38 |
vqa_answer,vqa_score=vqa_answer
|
39 |
-
prompt="
|
40 |
-
" B: "+vqa_answer[1]+" score:"+str(vqa_score[1])+" C: "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
41 |
-
" D: "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
42 |
". Choose A if it is not in conflict with the description of the picture and A's score is bigger than 0.8; otherwise choose the B, C or D based on the description. Answer with A or B or C or D."
|
43 |
|
44 |
# prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
@@ -72,14 +72,14 @@ def gpt3_long(question,vqa_answer,caption):
|
|
72 |
# " B: "+vqa_answer[1]+" score:"+str(vqa_score[1])+" C: "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
73 |
# " D: "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
74 |
# "Tell me the right answer with a long sentence."
|
|
|
|
|
|
|
|
|
75 |
# prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+" socre:"+str(vqa_score[0])+\
|
76 |
# " "+vqa_answer[1]+" score:"+str(vqa_score[1])+" "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
77 |
# " "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
78 |
-
# "Tell me the right answer with a sentence."
|
79 |
-
prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+" socre:"+str(vqa_score[0])+\
|
80 |
-
" "+vqa_answer[1]+" score:"+str(vqa_score[1])+" "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
81 |
-
" "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
82 |
-
"Tell me the right answer with a long sentence."
|
83 |
# prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
84 |
response = openai.Completion.create(
|
85 |
engine="text-davinci-003",
|
|
|
36 |
openai.api_key= os.getenv('openai_appkey')
|
37 |
def gpt3_short(question,vqa_answer,caption):
|
38 |
vqa_answer,vqa_score=vqa_answer
|
39 |
+
prompt="This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+"A: "+vqa_answer[0]+", socre:"+str(vqa_score[0])+\
|
40 |
+
"; B: "+vqa_answer[1]+", score:"+str(vqa_score[1])+"; C: "+vqa_answer[2]+", score:"+str(vqa_score[2])+\
|
41 |
+
"; D: "+vqa_answer[3]+', score:'+str(vqa_score[3])+\
|
42 |
". Choose A if it is not in conflict with the description of the picture and A's score is bigger than 0.8; otherwise choose the B, C or D based on the description. Answer with A or B or C or D."
|
43 |
|
44 |
# prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
|
|
72 |
# " B: "+vqa_answer[1]+" score:"+str(vqa_score[1])+" C: "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
73 |
# " D: "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
74 |
# "Tell me the right answer with a long sentence."
|
75 |
+
prompt="This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+", socre:"+str(vqa_score[0])+\
|
76 |
+
"; "+vqa_answer[1]+", score:"+str(vqa_score[1])+"; "+vqa_answer[2]+", score:"+str(vqa_score[2])+\
|
77 |
+
"; "+vqa_answer[3]+', score:'+str(vqa_score[3])+\
|
78 |
+
". Question: "+question+" Tell me the right answer with a sentence."
|
79 |
# prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+" socre:"+str(vqa_score[0])+\
|
80 |
# " "+vqa_answer[1]+" score:"+str(vqa_score[1])+" "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
81 |
# " "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
82 |
+
# "Tell me the right answer with a long sentence."
|
|
|
|
|
|
|
|
|
83 |
# prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
84 |
response = openai.Completion.create(
|
85 |
engine="text-davinci-003",
|