import torch from openai import OpenAI import os from transformers import pipeline client = OpenAI( api_key=os.environ.get("openai_key"), ) pipes = { 'GPT-Neo': pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B"), 'Llama 3': pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B") } def generate(text, model): if model is "GPT-Neo": response = pipes[model](text) return response[0] elif model is "Llama 3": response = pipes[model](text) return response[0] elif model is "OpenAI GPT 3.5": message=[{"role": "user", "content": text}] response = client.chat.completions.create( model="gpt-3.5-turbo", messages = message, temperature=0.2, max_tokens=800, frequency_penalty=0.0 ) return response[0].message.content elif model is "OpenAI GPT 4": message=[{"role": "user", "content": text}] response = client.chat.completions.create( model="gpt-4-turbo", messages = message, temperature=0.2, max_tokens=800, frequency_penalty=0.0 ) return response[0].message.content elif model is "OpenAI GPT 4o": message=[{"role": "user", "content": text}] response = client.chat.completions.create( model="gpt-4o", messages = message, temperature=0.2, max_tokens=800, frequency_penalty=0.0 ) return response[0].message.content