Spaces:
Runtime error
Runtime error
File size: 8,625 Bytes
61f3f56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import openai
import os
import argparse
import json
import ast
from multiprocessing.pool import Pool
def parse_args():
parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.")
parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.")
parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.")
parser.add_argument("--api_key", required=True, help="OpenAI API key.")
parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.")
parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.")
args = parser.parse_args()
return args
def annotate(prediction_set, caption_files, output_dir, args):
"""
Evaluates question and answer pairs using GPT-3 and
returns a score for consistency.
"""
# Set the OpenAI API key.
openai.api_key = args.api_key
if args.api_base is not None:
openai.api_base = args.api_base
for file in caption_files:
key = file[:-5] # Strip file extension
qa_set = prediction_set[key]
question1 = qa_set['q1']
question2 = qa_set['q2']
answer = qa_set['a']
pred1 = qa_set['pred1']
pred2 = qa_set['pred2']
try:
# Compute the consistency score
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content":
"You are an intelligent chatbot designed for evaluating the consistency of generative outputs for similar video-based question-answer pairs. "
"You will be given two very similar questions, a common answer common to both the questions and predicted answers for the two questions ."
"Your task is to compare the predicted answers for two very similar question, with a common correct answer and determine if they are consistent. Here's how you can accomplish the task:"
"------"
"##INSTRUCTIONS: "
"- Focus on the consistency between the two predicted answers and the correct answer. Both predicted answers should correspond to the correct answer and to each other, and should not contain any contradictions or significant differences in the conveyed information.\n"
"- Both predicted answers must be consistent with each other and the correct answer, in terms of the information they provide about the video content.\n"
"- Consider synonyms or paraphrases as valid matches, but only if they maintain the consistency in the conveyed information.\n"
"- Evaluate the consistency of the two predicted answers compared to the correct answer."
},
{
"role": "user",
"content":
"Please evaluate the following video-based question-answer pair:\n\n"
f"Question 1: {question1}\n"
f"Question 2: {question2}\n"
f"Correct Answer: {answer}\n"
f"Predicted Answer to Question 1: {pred1}\n"
f"Predicted Answer to Question 2: {pred2}\n\n"
"Provide your evaluation only as a consistency score where the consistency score is an integer value between 0 and 5, with 5 indicating the highest level of consistency. "
"Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the consistency score in INTEGER, not STRING."
"DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. "
"For example, your response should look like this: {''score': 4.8}."
}
]
)
# Convert response to a Python dictionary.
response_message = completion["choices"][0]["message"]["content"]
response_dict = ast.literal_eval(response_message)
result_qa_pair = [response_dict, qa_set]
# Save the question-answer pairs to a json file.
with open(f"{output_dir}/{key}.json", "w") as f:
json.dump(result_qa_pair, f)
except Exception as e:
print(f"Error processing file '{key}': {e}")
def main():
"""
Main function to control the flow of the program.
"""
# Parse arguments.
args = parse_args()
file = open(args.pred_path)
pred_contents = json.load(file)
# Dictionary to store the count of occurrences for each video_id
video_id_counts = {}
new_pred_contents = []
# Iterate through each sample in pred_contents
for sample in pred_contents:
video_id = sample['video_name']
if video_id in video_id_counts:
video_id_counts[video_id] += 1
else:
video_id_counts[video_id] = 0
# Create a new sample with the modified key
new_sample = sample
new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}"
new_pred_contents.append(new_sample)
# Generating list of id's and corresponding files
id_list = [x['video_name'] for x in new_pred_contents]
caption_files = [f"{id}.json" for id in id_list]
output_dir = args.output_dir
# Generate output directory if not exists.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Preparing dictionary of question-answer sets
prediction_set = {}
for sample in new_pred_contents:
id = sample['video_name']
question1 = sample['Q1']
question2 = sample['Q1']
answer = sample['A']
pred1 = sample['pred1']
pred2 = sample['pred2']
qa_set = {"q1": question1, "q2": question2, "a": answer, "pred1": pred1, "pred2": pred2}
prediction_set[id] = qa_set
# Set the OpenAI API key.
openai.api_key = args.api_key
num_tasks = args.num_tasks
# While loop to ensure that all captions are processed.
while True:
try:
# Files that have not been processed yet.
completed_files = os.listdir(output_dir)
print(f"completed_files: {len(completed_files)}")
# Files that have not been processed yet.
incomplete_files = [f for f in caption_files if f not in completed_files]
print(f"incomplete_files: {len(incomplete_files)}")
# Break the loop when there are no incomplete files
if len(incomplete_files) == 0:
break
if len(incomplete_files) <= num_tasks:
num_tasks = 1
# Split tasks into parts.
part_len = len(incomplete_files) // num_tasks
all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)]
task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts]
# Use a pool of workers to process the files in parallel.
with Pool() as pool:
pool.starmap(annotate, task_args)
except Exception as e:
print(f"Error: {e}")
# Combine all the processed files into one
combined_contents = {}
json_path = args.output_json
# Iterate through json files
for file_name in os.listdir(output_dir):
if file_name.endswith(".json"):
file_path = os.path.join(output_dir, file_name)
with open(file_path, "r") as json_file:
content = json.load(json_file)
combined_contents[file_name[:-5]] = content
# Write combined content to a json file
with open(json_path, "w") as json_file:
json.dump(combined_contents, json_file)
print("All evaluation completed!")
# Calculate average score
score_sum = 0
count = 0
for key, result in combined_contents.items():
count += 1
score_match = result[0]['score']
score = int(score_match)
score_sum += score
average_score = score_sum / count
print("Average score for consistency:", average_score)
if __name__ == "__main__":
main()
|