API / app.py
Raghav001's picture
Update app.py
64b9eb4
from fastapi import FastAPI, status
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from fastapi.responses import JSONResponse, StreamingResponse
import requests
import json
import openai
import time
from langchain.embeddings.openai import OpenAIEmbeddings
import langchain
class Text(BaseModel):
content: str = ""
app = FastAPI()
key = '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'
openai.api_key = key
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + key
}
@app.get("/")
def home():
html_content = open('index.html').read()
return HTMLResponse(content=html_content, status_code=200)
@app.post("/qa_maker")
def sentiment_analysis_ep(content: Text = None):
url = 'https://api.openai.com/v1/chat/completions'
prompt = 'According to the article below, generate "question and answer" QA pairs, greater than 5, in a json format per line({“question”:"xxx","answer":"xxx"})generate:\n'
messages = [{"role": "user", "content": prompt + content.content}]
data = {
"model": "gpt-3.5-turbo",
"messages": messages
}
print("messages = \n", messages)
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
res = str(result.json()['choices'][0]['message']['content']).strip()
print('res:', res)
res = {'content': res}
return JSONResponse(content=res)
@app.post("/chatpdf")
def chat_pdf_ep(content: Text = None):
url = 'https://api.openai.com/v1/chat/completions'
messages = [
{
"role": "system",
"content": "You are a useful assistant to answer questions accurately using the content of the article."
}
]
obj = json.loads(content.content)
messages.append({"role": "system", "content": "Article content:\n" + obj['doc']})
history = obj['history']
for his in history:
messages.append({"role": "user", "content": his[0]})
messages.append({"role": "assistant", "content": his[1]})
messages.append({"role": "user", "content": obj['question']})
data = {
"model": "gpt-3.5-turbo",
"messages": messages
}
print("messages = \n", messages)
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
res = str(result.json()['choices'][0]['message']['content']).strip()
content = {'content': res}
print('content:', content)
return JSONResponse(content=content)
@app.post("/sale")
def sale_ep(content: Text = None):
url = 'https://api.openai.com/v1/chat/completions'
messages = [
{
"role": "system",
"content": "You are a useful assistant to answer questions accurately using the content of the article"
}
]
obj = json.loads(content.content)
messages.append({"role": "system", "content": "Article content:\n" + obj['doc']})
history = obj['history']
for his in history:
messages.append({"role": "user", "content": his[0]})
messages.append({"role": "assistant", "content": his[1]})
messages.append({"role": "user", "content": obj['question']})
data = {
"model": "gpt-3.5-turbo",
"messages": messages
}
print("messages = \n", messages)
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
res = str(result.json()['choices'][0]['message']['content']).strip()
content = {'content': res}
print('content:', content)
return JSONResponse(content=content)
@app.post("/chatgpt")
def chat_gpt_ep(content: Text = None):
url = 'https://api.openai.com/v1/chat/completions'
obj = json.loads(content.content)
data = {
"model": "gpt-3.5-turbo",
"messages": obj['messages']
}
print("data = \n", data)
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
res = str(result.json()['choices'][0]['message']['content']).strip()
content = {'content': res}
print('content:', content)
return JSONResponse(content=content)
async def chat_gpt_stream_fun(content: Text = None):
start_time = time.time()
obj = json.loads(content.content)
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=obj['messages'],
stream=True, # this time, we set stream=True
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
for chunk in response:
chunk_time = time.time() - start_time # calculate the time delay of the chunk
collected_chunks.append(chunk) # save the event response
chunk_message = chunk['choices'][0]['delta'] # extract the message
collected_messages.append(chunk_message) # save the message
print(f"Message received {chunk_time:.2f} seconds after request: {chunk_message}") # print the delay and text
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
print(f"Full conversation received: {full_reply_content}")
content = {'content': full_reply_content}
print('content:', content)
yield json.dumps(content) + '\n'
@app.post("/chatgptstream", status_code=status.HTTP_200_OK)
async def get_random_numbers(content: Text = None):
return StreamingResponse(chat_gpt_stream_fun(content), media_type='application/json')
@app.post("/embeddings")
def embeddings_ep(content: Text = None):
url = 'https://api.openai.com/v1/embeddings'
data = {
"model": "text-embedding-ada-002",
"input": content.content
}
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
return JSONResponse(content=result.json())
@app.post("/embedd")
def embed(content: Text = None):
url = 'https://api.openai.com/v1/embeddings'
data = {
"model": "text-embedding-ada-002",
"input": content.content
}
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
embeddings = OpenAIEmbeddings(openai_api_key= key)
return key
@app.post("/create_image")
def create_image_ep(content: Text = None):
url = 'https://api.openai.com/v1/images/generations'
obj = json.loads(content.content)
data = {
"prompt": obj["prompt"],
"n": obj["n"],
"size": obj["size"]
}
print("data = \n", data)
result = requests.post(url=url,
data=json.dumps(data),
headers=headers
)
return JSONResponse(content=result.json())