Spaces:
No application file
No application file
import os | |
from openai import OpenAI | |
openai.api_key = os.environ.get("OPENAI_API_KEY") | |
openai.organization = os.environ.get("OPENAI_ORG_ID") | |
models = { | |
"assistant": "You are a helpful assistant.", | |
"binary": "you are a maschine that converts questions or prompts to binary outputs. " | |
"you can only answer 'yes' or 'no'. if uncertain, default to 'no'." | |
} | |
def gpt4_new(prompt_text): | |
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY')) | |
response = client.chat.completions.create( | |
model="gpt-4", | |
messages=[{"role": "system", | |
"content": "Du bist eine Maschine, die Dokumente klassifiziert."}, | |
{"role": "user", "content": prompt_text}]) | |
return response.choices[0].message.content | |
def gpt4(prompt, model=models["assistant"]): | |
response = openai.Completion.create( | |
model="gpt-4", | |
messages=[ | |
{"role": "system", "content": model}, | |
{"role": "user", "content": prompt} | |
] | |
) | |
return response.choices[0].message['content'] | |
def gpt_bool(prompt): | |
""" | |
:param prompt: the text prompt | |
:return: True or False | |
""" | |
true_values = ["yes", "Yes", "Y", "y", "yes.", "Yes.", "YES"] | |
return bool(gpt4(prompt, model=models["binary"]) in true_values) | |
def vectorize_data(data_input): | |
try: | |
response = openai.Embedding.create(input=data_input, model="text-embedding-ada-002") | |
except openai.error.InvalidRequestError as err: | |
print(err) | |
return [0, 0, 0] | |
return response['data'][0]['embedding'] | |
if __name__ == "__main__": | |
print("here are all functions that directly call openai.") | |
print("hi, im chatGPT how can I help? ") | |
while True: | |
print(gpt_bool(input())) | |