eagle0504 commited on
Commit
f669fa2
·
verified ·
1 Parent(s): 9446adc

Update helper/utils.py

Browse files
Files changed (1) hide show
  1. helper/utils.py +7 -7
helper/utils.py CHANGED
@@ -100,7 +100,7 @@ def read_and_textify(
100
  return text_list, sources_list
101
 
102
 
103
- client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
104
 
105
 
106
  def list_to_nums(sentences: List[str]) -> List[List[float]]:
@@ -121,7 +121,7 @@ def list_to_nums(sentences: List[str]) -> List[List[float]]:
121
  for sentence in sentences:
122
  # Use the OpenAI API to get embeddings for the sentence
123
 
124
- response = client.embeddings.create(
125
  input=sentence, model="text-embedding-3-small"
126
  )
127
 
@@ -146,12 +146,12 @@ def call_gpt(prompt: str, content: str) -> str:
146
  Returns:
147
  str: The generated response from the GPT model based on the given prompts and content.
148
 
149
- Note: 'client' is assumed to be an already created and authenticated instance of the OpenAI
150
- client, which should be set up prior to calling this function.
151
  """
152
 
153
  # Generates a response from the model based on the interactive messages provided
154
- response = client.chat.completions.create(
155
  model="gpt-3.5-turbo", # The AI model being queried for a response
156
  messages=[
157
  # System message defining the assistant's role
@@ -169,7 +169,7 @@ def call_gpt(prompt: str, content: str) -> str:
169
  return response.choices[0].message.content
170
 
171
 
172
- client = Together(api_key=os.environ["TOGETHER_API_KEY"])
173
 
174
  def call_llama(prompt: str) -> str:
175
  """
@@ -181,7 +181,7 @@ def call_llama(prompt: str) -> str:
181
  """
182
 
183
  # Create a completion request with the prompt
184
- response = client.chat.completions.create(
185
 
186
  # Use the Llama-3-8b-chat-hf model
187
  model="meta-llama/Llama-3-8b-chat-hf",
 
100
  return text_list, sources_list
101
 
102
 
103
+ openai_client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
104
 
105
 
106
  def list_to_nums(sentences: List[str]) -> List[List[float]]:
 
121
  for sentence in sentences:
122
  # Use the OpenAI API to get embeddings for the sentence
123
 
124
+ response = openai_client.embeddings.create(
125
  input=sentence, model="text-embedding-3-small"
126
  )
127
 
 
146
  Returns:
147
  str: The generated response from the GPT model based on the given prompts and content.
148
 
149
+ Note: 'openai_client' is assumed to be an already created and authenticated instance of the OpenAI
150
+ openai_client, which should be set up prior to calling this function.
151
  """
152
 
153
  # Generates a response from the model based on the interactive messages provided
154
+ response = openai_client.chat.completions.create(
155
  model="gpt-3.5-turbo", # The AI model being queried for a response
156
  messages=[
157
  # System message defining the assistant's role
 
169
  return response.choices[0].message.content
170
 
171
 
172
+ together_client = Together(api_key=os.environ["TOGETHER_API_KEY"])
173
 
174
  def call_llama(prompt: str) -> str:
175
  """
 
181
  """
182
 
183
  # Create a completion request with the prompt
184
+ response = together_client.chat.completions.create(
185
 
186
  # Use the Llama-3-8b-chat-hf model
187
  model="meta-llama/Llama-3-8b-chat-hf",