poemsforaphrodite commited on
Commit
a2a4c11
1 Parent(s): af2b19d

Create runner.py

Browse files
Files changed (1) hide show
  1. runner.py +62 -0
runner.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ from dotenv import load_dotenv
4
+
5
+ # Load environment variables
6
+ load_dotenv()
7
+
8
+ # Initialize OpenAI client with API key from .env
9
+ openai_client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
10
+
11
+ def run_gpt4o_mini(question):
12
+ try:
13
+ # Check if the question is a dictionary and extract the prompt
14
+ if isinstance(question, dict) and 'prompt' in question:
15
+ question_text = question['prompt']
16
+ else:
17
+ question_text = str(question)
18
+
19
+ response = openai_client.chat.completions.create(
20
+ model="gpt-4o-mini",
21
+ messages=[
22
+ {"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
23
+ {"role": "user", "content": question_text}
24
+ ]
25
+ )
26
+ return response.choices[0].message.content
27
+ except Exception as e:
28
+ print(f"Error running GPT-4o-mini: {str(e)}")
29
+ return None
30
+
31
+ def run_gpt4o(question):
32
+ try:
33
+ # Check if the question is a dictionary and extract the prompt
34
+ if isinstance(question, dict) and 'prompt' in question:
35
+ question_text = question['prompt']
36
+ else:
37
+ question_text = str(question)
38
+
39
+ response = openai_client.chat.completions.create(
40
+ model="gpt-4o-mini",
41
+ messages=[
42
+ {"role": "system", "content": "You are a helpful assistant. Answer the question to the best of your ability."},
43
+ {"role": "user", "content": question_text}
44
+ ]
45
+ )
46
+ return response.choices[0].message.content
47
+ except Exception as e:
48
+ print(f"Error running GPT-4o-mini: {str(e)}")
49
+ return None
50
+
51
+ def run_custom_model(model_name, question):
52
+ # Placeholder for custom model logic
53
+ # You'll need to implement this based on how your custom models work
54
+ return f"Custom model {model_name} response: This is a placeholder answer for the question provided."
55
+
56
+ def run_model(model_name, question):
57
+ if model_name == "gpt-4o-mini":
58
+ return run_gpt4o_mini(question)
59
+ elif model_name == "gpt-4o":
60
+ return run_gpt4o(question)
61
+ else:
62
+ return run_custom_model(model_name, question)