Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files
app.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import time
|
3 |
+
from main import *
|
4 |
+
import textwrap
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
+
from langchain.llms import HuggingFaceHub
|
8 |
+
from langchain.document_loaders import YoutubeLoader
|
9 |
+
from langchain.chains import LLMChain
|
10 |
+
from langchain.prompts import (SystemMessagePromptTemplate,HumanMessagePromptTemplate,
|
11 |
+
ChatPromptTemplate)
|
12 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
13 |
+
from langchain.vectorstores import FAISS
|
14 |
+
import textwrap
|
15 |
+
from langchain_google_genai import GoogleGenerativeAI
|
16 |
+
|
17 |
+
load_dotenv()
|
18 |
+
|
19 |
+
st.header("Youtube Q&A")
|
20 |
+
|
21 |
+
querry_input = st.chat_input()
|
22 |
+
url_input = st.text_input("Your Youtube url")
|
23 |
+
|
24 |
+
bot = st.chat_message("ai")
|
25 |
+
bot.write("Hello!,\n enter the url in the allocated area and type the query.")
|
26 |
+
|
27 |
+
if querry_input:
|
28 |
+
user = st.chat_message("user")
|
29 |
+
user.write(querry_input)
|
30 |
+
bot = st.chat_message("ai")
|
31 |
+
with bot.status("Patience and thee shalt knoweth thy answ'r") as status:
|
32 |
+
st.write("Creating Faiss database...")
|
33 |
+
time.sleep(1)
|
34 |
+
st.write("Generating asnwer for your querry....")
|
35 |
+
|
36 |
+
response,docs = app(url=url_input,querry=querry_input)
|
37 |
+
status.update(label= "Done!",state="complete")
|
38 |
+
bot = st.chat_message("ai")
|
39 |
+
bot.write(response)
|
40 |
+
|
main.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Prerequisite libraries
|
2 |
+
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
5 |
+
from langchain.llms import HuggingFaceHub
|
6 |
+
from langchain.document_loaders import YoutubeLoader
|
7 |
+
from langchain.chains import LLMChain
|
8 |
+
from langchain.prompts import (SystemMessagePromptTemplate,HumanMessagePromptTemplate,
|
9 |
+
ChatPromptTemplate)
|
10 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
11 |
+
from langchain.vectorstores import FAISS
|
12 |
+
import textwrap
|
13 |
+
from langchain_google_genai import GoogleGenerativeAI
|
14 |
+
|
15 |
+
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
+
def app(url,querry):
|
19 |
+
loader = YoutubeLoader.from_youtube_url(youtube_url=url)
|
20 |
+
transcript = loader.load()
|
21 |
+
embeddings = HuggingFaceEmbeddings()
|
22 |
+
rcts = RecursiveCharacterTextSplitter(chunk_size=400,chunk_overlap=20)
|
23 |
+
docs = rcts.split_documents(transcript)
|
24 |
+
|
25 |
+
db = FAISS.from_documents(docs,embeddings)
|
26 |
+
|
27 |
+
docs = db.similarity_search(querry, k=3)
|
28 |
+
docs_page_content = " ".join([d.page_content for d in docs])
|
29 |
+
|
30 |
+
template = """
|
31 |
+
hey you are a very helpful Ai assistant who is able to answer question about youtube videos based on the video's
|
32 |
+
transcript: {source}
|
33 |
+
Only use the factual imformation gathered from the transcript to answer the question also answer it in a very detailed manner more than 30 words.
|
34 |
+
If you feel that you dont have enough imformation to answer the question say "I dont have enough imformation in order to answer this question".
|
35 |
+
"""
|
36 |
+
|
37 |
+
llm = GoogleGenerativeAI(model="models/text-bison-001")
|
38 |
+
|
39 |
+
system_msg_template = SystemMessagePromptTemplate.from_template(template)
|
40 |
+
|
41 |
+
human_template = "Answer the following question: {question}"
|
42 |
+
human_msg_template = HumanMessagePromptTemplate.from_template(human_template)
|
43 |
+
|
44 |
+
chat_prompt = ChatPromptTemplate.from_messages(
|
45 |
+
[system_msg_template,human_msg_template],
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
chain = LLMChain(llm = llm,prompt = chat_prompt)
|
50 |
+
|
51 |
+
responce = chain.run(question = querry,source = docs_page_content)
|
52 |
+
|
53 |
+
return responce, docs
|