Boltuzamaki commited on
Commit
b7b243c
1 Parent(s): 525026c
Files changed (5) hide show
  1. .gitignore +162 -0
  2. app.py +87 -0
  3. src/__init__.py +0 -0
  4. src/qna.py +118 -0
  5. src/youtube_audio_loader.py +39 -0
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+
4
+ import streamlit as st
5
+ from src.qna import ConversationalQA
6
+ from src.youtube_audio_loader import youtube_transcriber
7
+
8
+ if "store" not in st.session_state:
9
+ st.session_state.store = {}
10
+ if "docs" not in st.session_state:
11
+ st.session_state.docs = None
12
+ if "messages" not in st.session_state:
13
+ st.session_state.messages = {}
14
+
15
+ st.set_page_config(page_title="YouTube Transcriber & Chatbot")
16
+ st.sidebar.title("Configuration")
17
+ openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
18
+ os.environ["OPENAI_API_KEY"] = openai_api_key
19
+
20
+ model = st.sidebar.selectbox(
21
+ "Model", options=["gpt-4o", "gpt-4o-mini"], index=0
22
+ )
23
+
24
+ use_whisper_api = st.sidebar.checkbox(
25
+ "Use Whisper API for Transcription", value=False
26
+ )
27
+
28
+ if use_whisper_api:
29
+ st.sidebar.warning("Using OpenAI Whisper API may incur costs.")
30
+ local = False
31
+ else:
32
+ local = True
33
+
34
+ st.title("YouTube Video Transcriber & Chatbot")
35
+
36
+ youtube_link = st.text_input("Enter YouTube Video Link")
37
+
38
+ if youtube_link:
39
+ st.video(youtube_link)
40
+
41
+ # Transcription
42
+ if st.button("Transcribe"):
43
+ if openai_api_key:
44
+ st.session_state.docs = youtube_transcriber(youtube_link, local=local)
45
+ st.session_state.messages = []
46
+ st.success("Transcription completed!")
47
+ else:
48
+ st.error("Please enter your OpenAI API key.")
49
+
50
+ if st.session_state.docs:
51
+ qa_system = ConversationalQA(docs=st.session_state.docs)
52
+
53
+ st.write("### Ask me anything!")
54
+
55
+ def display_message(role, content, timestamp):
56
+ with st.chat_message(role):
57
+ st.markdown(f"**{role.capitalize()}:** {content}")
58
+ st.markdown(
59
+ f"<small><i>{timestamp}</i></small>", unsafe_allow_html=True
60
+ )
61
+
62
+ if st.session_state.messages:
63
+ for message in st.session_state.messages:
64
+ display_message(
65
+ message["role"], message["content"], message["timestamp"]
66
+ )
67
+
68
+ if prompt := st.chat_input("Your question here..."):
69
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
70
+ display_message("user", prompt, timestamp)
71
+
72
+ st.session_state.messages.append(
73
+ {"role": "user", "content": prompt, "timestamp": timestamp}
74
+ )
75
+
76
+ with st.spinner("Thinking..."):
77
+ response = qa_system.invoke_chain(
78
+ session_id="1", user_input=prompt
79
+ )
80
+ bot_response = response
81
+
82
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
83
+ display_message("bot", bot_response, timestamp)
84
+
85
+ st.session_state.messages.append(
86
+ {"role": "bot", "content": bot_response, "timestamp": timestamp}
87
+ )
src/__init__.py ADDED
File without changes
src/qna.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chains.combine_documents import create_stuff_documents_chain
2
+ from langchain.chains.history_aware_retriever import (
3
+ create_history_aware_retriever,
4
+ )
5
+ from langchain.chains.retrieval import create_retrieval_chain
6
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_chroma import Chroma
8
+ from langchain_community.chat_message_histories import ChatMessageHistory
9
+ from langchain_core.chat_history import BaseChatMessageHistory
10
+ from langchain_core.runnables.history import RunnableWithMessageHistory
11
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
12
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
13
+
14
+
15
+ class ConversationalQA:
16
+ """
17
+ A class that handles conversational question-answering using a
18
+ retrieval-augmented generation approach with session history and
19
+ document retrieval capabilities.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ docs: list,
25
+ chunk_size: int = 1000,
26
+ chunk_overlap: int = 200,
27
+ ):
28
+ """
29
+ Initialize the ConversationalQA class with API key, documents, and
30
+ text splitting configurations.
31
+
32
+ :param openai_api_key: OpenAI API key to access LLM
33
+ :param docs: List of documents to be used for retrieval and answering
34
+ :param chunk_size: Maximum size of each text chunk for processing
35
+ :param chunk_overlap: Number of characters to overlap between chunks
36
+ """
37
+ self.text_splitter = RecursiveCharacterTextSplitter(
38
+ chunk_size=chunk_size, chunk_overlap=chunk_overlap
39
+ )
40
+ self.splits = self.text_splitter.split_documents(docs)
41
+ self.llm = ChatOpenAI()
42
+ self.vectorstore = Chroma.from_documents(
43
+ documents=self.splits,
44
+ embedding=OpenAIEmbeddings(),
45
+ collection_name="youtube",
46
+ )
47
+ self.retriever = self.vectorstore.as_retriever()
48
+
49
+ self.qa_system_prompt = """You are an assistant for question-answering
50
+ tasks. Use the following pieces of retrieved context to answer the
51
+ question. If you don't know the answer, just say that you don't know.
52
+ Use three sentences maximum and keep the answer
53
+ concise.\n\n{context}"""
54
+
55
+ self.qa_prompt = ChatPromptTemplate.from_messages(
56
+ [
57
+ ("system", self.qa_system_prompt),
58
+ MessagesPlaceholder("chat_history"),
59
+ ("human", "{input}"),
60
+ ]
61
+ )
62
+
63
+ self.contextualize_q_system_prompt = """Given a chat history and the
64
+ latest user question which might reference context in the chat
65
+ history, formulate a standalone question which can be understood
66
+ without the chat history. Do NOT answer the question, just
67
+ reformulate it if needed and otherwise return it as is."""
68
+
69
+ self.contextualize_q_prompt = ChatPromptTemplate.from_messages(
70
+ [
71
+ ("system", self.contextualize_q_system_prompt),
72
+ MessagesPlaceholder("chat_history"),
73
+ ("human", "{input}"),
74
+ ]
75
+ )
76
+
77
+ self.question_answer_chain = create_stuff_documents_chain(
78
+ self.llm, self.qa_prompt
79
+ )
80
+ self.history_aware_chain = create_history_aware_retriever(
81
+ self.llm, self.retriever, self.contextualize_q_prompt
82
+ )
83
+ self.rag_chain = create_retrieval_chain(
84
+ self.history_aware_chain, self.question_answer_chain
85
+ )
86
+ self.store = {}
87
+
88
+ def get_session_history(self, session_id: str) -> BaseChatMessageHistory:
89
+ """
90
+ Retrieve or create a chat history for a given session ID.
91
+
92
+ :param session_id: Unique session identifier
93
+ :return: ChatMessageHistory object for the session
94
+ """
95
+ if session_id not in self.store:
96
+ self.store[session_id] = ChatMessageHistory()
97
+ return self.store[session_id]
98
+
99
+ def invoke_chain(self, session_id: str, user_input: str) -> str:
100
+ """
101
+ Invoke the conversational question-answering chain with user input
102
+ and session history.
103
+
104
+ :param session_id: Unique session identifier
105
+ :param user_input: User's question input
106
+ :return: Answer generated by the system
107
+ """
108
+ conversational_rag_chain = RunnableWithMessageHistory(
109
+ self.rag_chain,
110
+ self.get_session_history,
111
+ input_messages_key="input",
112
+ history_messages_key="chat_history",
113
+ output_messages_key="answer",
114
+ )
115
+ return conversational_rag_chain.invoke(
116
+ {"input": user_input},
117
+ config={"configurable": {"session_id": session_id}},
118
+ )["answer"]
src/youtube_audio_loader.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain.document_loaders.parsers.audio import (
4
+ OpenAIWhisperParser,
5
+ OpenAIWhisperParserLocal,
6
+ )
7
+ from langchain_community.document_loaders.blob_loaders.youtube_audio import (
8
+ YoutubeAudioLoader,
9
+ )
10
+ from langchain_community.document_loaders.generic import GenericLoader
11
+
12
+
13
+ def youtube_transcriber(youtube_video_link, local=True):
14
+ urls = [youtube_video_link]
15
+
16
+ save_dir = os.path.expanduser("~/Downloads/YouTube")
17
+ if not os.path.exists(save_dir):
18
+ os.makedirs(save_dir)
19
+
20
+ if local:
21
+ loader = GenericLoader(
22
+ YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParserLocal()
23
+ )
24
+ else:
25
+ loader = GenericLoader(
26
+ YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParser()
27
+ )
28
+
29
+ docs = loader.load()
30
+
31
+ for file_name in os.listdir(save_dir):
32
+ file_path = os.path.join(save_dir, file_name)
33
+ if os.path.isfile(file_path):
34
+ os.remove(file_path)
35
+
36
+ if not os.listdir(save_dir):
37
+ os.rmdir(save_dir)
38
+
39
+ return docs