Spaces:
Sleeping
Sleeping
File size: 3,178 Bytes
e0f406c 8145c86 e0f406c 18b8295 e0f406c 8145c86 e0f406c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
from .base_model import BaseModel
import openai
from openai import AsyncOpenAI, OpenAI
from tqdm import tqdm
import asyncio
import os
import traceback
class GPT4Model(BaseModel):
def __init__(self,
generation_model="gpt-4-vision-preview",
embedding_model="text-embedding-ada-002",
temperature=0,
) -> None:
self.generation_model = generation_model
self.embedding_model = embedding_model
self.temperature = temperature
async def respond_async(self, messages: list[dict]) -> str:
client = AsyncOpenAI(
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"]
)
try:
output = await client.chat.completions.create(
messages=messages,
model=self.generation_model,
temperature=self.temperature,
max_tokens=1000,
)
response = output.choices[0].message.content
except:
try:
output = await client.chat.completions.create(
messages=messages,
model=self.generation_model,
temperature=self.temperature,
max_tokens=1000,
)
response = output.choices[0].message.content
except:
response = "No answer provided."
return response
return response
def respond(self, messages: list[dict]) -> str:
client = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"]
)
# OpenAI.api_key=os.environ["OPENAI_API_KEY"]
# OpenAI.api_base=os.environ["OPENAI_API_BASE"]
try:
response = client.chat.completions.create(
messages=messages,
model=self.generation_model,
temperature=self.temperature,
max_tokens=1000,
).choices[0].message.content
except:
try:
response = client.chat.completions.create(
messages=messages,
model=self.generation_model,
temperature=self.temperature,
max_tokens=1000,
).choices[0].message.content
except:
print(traceback.format_exc())
response = "No answer provided."
return response
def embedding(self, texts: list[str]) -> list[float]:
client = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"]
)
data = []
# print(f"{self.embedding_model} Embedding:")
for i in range(0, len(texts), 2048):
lower = i
upper = min(i+2048, len(texts))
data += client.embeddings.create(input=texts[lower:upper],
model=self.embedding_model
).data
embeddings = [d.embedding for d in data]
return embeddings |