Spaces:
Runtime error
Runtime error
File size: 2,869 Bytes
0197313 c571641 7cc6bf7 c571641 0197313 61c56fd c571641 0d054d8 c571641 c1aae0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import os
import json
import gradio as gr
import pandas as pd
from openai import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
API_KEY = os.getenv("YOUR_KEY")
PASSWORD = os.getenv("PASSWORD")
MODEL = "gpt-4o"
client = OpenAI(api_key = API_KEY)
embeddings = OpenAIEmbeddings(model = "text-embedding-3-large", api_key = API_KEY)
yt_chunks = FAISS.load_local("vector-large", embeddings, allow_dangerous_deserialization = True)
df = pd.read_csv("data/ko-youtube-trans-U10k.csv")
def find_docs(message):
finding_docs = yt_chunks.similarity_search(message, k = 5)
indices = [doc.metadata['row'] for doc in finding_docs]
retrievers = [json.loads(df.loc[idx].to_json(force_ascii = False)) for idx in indices]
return retrievers
def predict(message, history):
openai_input = list()
retriever = find_docs(message)
system_prompt = """- You are an AI chat bot that recommends YouTube content to users as an assistant.\n- You were created and powered by 'bigster (๋น
์คํฐ)', an AI & bigdata expert company.\n- Recommend YouTube content to users based on what's in โretrieverโ.\n- If the user's question is not related to content recommendations, please display a message declining to answer.\n- You must recommend at least 3 YouTube content items to the user based on the information in the 'retriever'. Be sure to explicitly include 'url' & 'videoChannelName' & 'videoName' information in your response. Also, for each featured piece of content, summarize what's in the 'transcription' and present it to the user. Use the following Markdown format to create hyperlinks: '[videoName](url)'\n\n retriever:\n{retriever}"""
for human, assistant in history:
openai_input.append({"role": "user", "content": human})
openai_input.append({"role": "assistant", "content": assistant})
openai_input = [item for item in openai_input if item['role'] != "system"]
openai_input.append({"role": "system", "content": system_prompt.format(retriever = retriever)})
openai_input.append({"role": "user", "content": message})
response = client.chat.completions.create(
model = MODEL,
messages = openai_input,
temperature = 1.0,
stream = True
)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message = partial_message + chunk.choices[0].delta.content
yield partial_message
print(openai_input)
gr.ChatInterface(
predict,
title = "YOUTUBE REC",
theme = gr.themes.Soft(primary_hue = "purple"),
examples = [
"ํ์ด์ฌ ํ๋ก๊ทธ๋๋ฐ ์ธ์ด๋ฅผ ๋
ํํ๊ธฐ ์ํ ์์์ ์ถ์ฒํด์ค.",
]
).launch(share = True, auth = ("user", PASSWORD)) |