Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -227,11 +227,9 @@ def chatbot(policy_name_dd,contract_text,progress=gr.Progress()):
|
|
227 |
with open('analysis_input.txt', 'w', encoding='utf-8') as out_file:
|
228 |
out_file.write(trimmed_input)
|
229 |
"""
|
230 |
-
|
231 |
response = openai.ChatCompletion.create(
|
232 |
-
|
233 |
-
model="gpt-4-1106-preview",
|
234 |
-
# model="gpt-3.5-turbo",
|
235 |
temperature=0,
|
236 |
messages=[
|
237 |
{
|
@@ -248,10 +246,15 @@ def chatbot(policy_name_dd,contract_text,progress=gr.Progress()):
|
|
248 |
gpt_response = response.choices[0].message["content"]
|
249 |
|
250 |
tokens_used = response.usage
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
|
|
|
|
|
|
|
|
|
|
255 |
|
256 |
"""
|
257 |
with open('response.txt', 'w', encoding='utf-8') as out_file:
|
|
|
227 |
with open('analysis_input.txt', 'w', encoding='utf-8') as out_file:
|
228 |
out_file.write(trimmed_input)
|
229 |
"""
|
230 |
+
gpt_model = "gpt-4-1106-preview"
|
231 |
response = openai.ChatCompletion.create(
|
232 |
+
model=gpt_model,
|
|
|
|
|
233 |
temperature=0,
|
234 |
messages=[
|
235 |
{
|
|
|
246 |
gpt_response = response.choices[0].message["content"]
|
247 |
|
248 |
tokens_used = response.usage
|
249 |
+
if gpt_model=="gpt-4":
|
250 |
+
question_cost = (tokens_used.get('total_tokens', 0) / 1000) * .03
|
251 |
+
prompt_tokens = tokens_used.get('prompt_tokens',)
|
252 |
+
completion_tokens = tokens_used.get('completion_tokens', 0)
|
253 |
+
|
254 |
+
else:
|
255 |
+
prompt_tokens = tokens_used.get('prompt_tokens',)
|
256 |
+
completion_tokens = tokens_used.get('completion_tokens', 0)
|
257 |
+
question_cost = ((prompt_tokens / 1000 ) * .01) + ((completion_tokens / 1000 ) * .03)
|
258 |
|
259 |
"""
|
260 |
with open('response.txt', 'w', encoding='utf-8') as out_file:
|