Spaces:
Runtime error
Runtime error
File size: 3,422 Bytes
c571641 7cc6bf7 c571641 0d054d8 c571641 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import json
import gradio as gr
import pandas as pd
from openai import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
MODEL = "gpt-4o"
API_KEY = "sk-proj-FV9lzQDevcA7M7yllkL7T3BlbkFJgjk8JBewp08UwSFJwaXD"
# BASE_URL = "https://youtu.be/"
client = OpenAI(api_key = API_KEY)
embeddings = OpenAIEmbeddings(model = "text-embedding-3-large", api_key = API_KEY)
yt_chunks = FAISS.load_local("vector-large", embeddings, allow_dangerous_deserialization = True)
df = pd.read_csv("data/ko-youtube-trans-U10k.csv")
def find_docs(message):
finding_docs = yt_chunks.similarity_search(message, k = 5)
indices = [doc.metadata['row'] for doc in finding_docs]
retrievers = [json.loads(df.loc[idx].to_json(force_ascii = False)) for idx in indices]
return retrievers
def predict(message, history):
openai_input = list()
retriever = find_docs(message)
system_prompt = """- You are an AI chat bot that recommends YouTube content to users as an assistant.\n- You were created and powered by 'bigster (λΉ
μ€ν°)', an AI & bigdata expert company.\n- Recommend YouTube content to users based on what's in βretrieverβ.\n- If the user's question is not related to content recommendations, please display a message declining to answer.\n- You must recommend at least 3 YouTube content items to the user based on the information in the 'retriever'. Be sure to explicitly include 'url' & 'videoChannelName' & 'videoName' information in your response. Also, for each featured piece of content, summarize what's in the 'transcription' and present it to the user. Use the following Markdown format to create hyperlinks: '[videoName](url)'\n\n retriever:\n{retriever}"""
for human, assistant in history:
openai_input.append({"role": "user", "content": human})
openai_input.append({"role": "assistant", "content": assistant})
openai_input = [item for item in openai_input if item['role'] != "system"]
openai_input.append({"role": "system", "content": system_prompt.format(retriever = retriever)})
openai_input.append({"role": "user", "content": message})
response = client.chat.completions.create(
model = MODEL,
messages = openai_input,
temperature = 1.0,
stream = True
)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message = partial_message + chunk.choices[0].delta.content
yield partial_message
print(openai_input)
gr.ChatInterface(
predict,
title = "YOUTUBE REC",
theme = gr.themes.Soft(primary_hue = "purple"),
examples = [
"λ€ μ΄λ¦μ λμΌ?",
"νμ΄μ¬ νλ‘κ·Έλλ° μΈμ΄λ₯Ό λ
ννκΈ° μν μμμ μΆμ²ν΄μ€.",
"μΈκ°κ΄κ³μμ ν° μμ€κ°μ λλΌλ λλ₯Ό μν μμμ μΆμ²ν΄μ€.",
"κ°λ¨νκ³ μ΄λ³΄μλ μ΄ν΄νκΈ° μ¬μ΄ λ₯λ¬λ κ°μ μΆμ²ν΄μ€.",
"νλμ€ μμ¬μ λν΄ κ³΅λΆνκ³ μΆμ΄μ. νλμ€ μμ¬μ κ΄λ ¨λ λ€νλ©ν°λ¦¬, κ°μ μμμ μΆμ²ν΄μ£ΌμΈμ.",
"μμ¦ μμΈμ΄λ₯Ό μμ±νκ³ μμ΅λλ€. λ³΄λ€ ν¨κ³Όμ μΌλ‘ κΈμ μ¨λ΄λ €κ°λ λ°©λ²μ μ μν΄μ£Όλ μμμ μΆμ²ν΄μ£ΌμΈμ."
]
).launch(share = True, auth = ("user", "bigster123")) |