Spaces:
Runtime error
Runtime error
File size: 1,887 Bytes
064358f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import os
import time
import openai
from dotenv import load_dotenv
import requests
import schedule
from rich import print
load_dotenv()
# Load environment variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.helpingai.co/v1")
if not OPENAI_API_KEY:
raise ValueError("OPENAI_API_KEY must be set in .env file")
# Initialize OpenAI client
client = openai.OpenAI(
api_key=OPENAI_API_KEY,
base_url=OPENAI_BASE_URL
)
# Hardcoded list of models
MODELS = [
"HelpingAI2.5-10B",
"HelpingAI2.5-2B",
"HelpingAI2.5-5B",
"HelpingAI-flash",
"HelpingAI2-9B",
"HelpingAI2-6B",
"HelpingAI-15B",
"HELVETE",
"HELVETE-X",
"Priya-3B",
"HelpingAI2.5-10B-1M",
"Cipher-20B",
"HelpingAI2-3B"
]
# Function to make a request to a model with retry mechanism
def make_request(model_name):
max_retries = 3
retry_delay = 10 # seconds
for attempt in range(max_retries):
try:
print(f"Requesting model: {model_name}")
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": "Hello, how are you?"}]
)
print(f"Response from {model_name}: {response.choices[0].message.content}\n")
break # Break if successful
except Exception as e:
print(f"Error with model {model_name}: {e}")
if attempt < max_retries - 1:
print(f"Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
else:
print("Max retries reached. Could not get a response.")
def job():
for model in MODELS:
make_request(model)
if __name__ == "__main__":
schedule.every(5).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(1)
|