RabotiahovDmytro commited on
Commit
4c65e87
1 Parent(s): 172d9b6

Upload 14 files

Browse files
.env ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ GROQ_API_KEY=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
2
+ DENSE_RETRIEVER_MODEL_NAME = all-MiniLM-L6-v2
3
+ CROSS_ENCODER_MODEL_NAME = cross-encoder/ms-marco-MiniLM-L-12-v2
4
+ LLM_CORE_MODEL_NAME = groq/llama3-8b-8192
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10.2
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
RAG_project_plan.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ RAG project planning:
2
+ 1. Data source: book(full english version of Count of Monte Cristo).
3
+ 2. Chunking done by size.
4
+ 3. Retriver bm25 + bi-encoder.
5
+ 4. Reranker(sentence bert).
6
+ 5. LiteLLM library (llama3-8b-8192 model).
7
+ 6. Metadata filtering wasn't used.
8
+ 7. Citation: user will be provided with the text citation along with Chapter this text belongs to.
9
+ 8. Vector database wasn't used.
10
+ 9. Web UI done with gradio
11
+
README.md CHANGED
@@ -1,12 +1 @@
1
- ---
2
- title: RAGSystem
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- license: apache-2.0
9
- short_description: RAG system based on the book 'The Count of Monte Cristo'
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # Neural_Networks_RAG
 
 
 
 
 
 
 
 
 
 
 
chatbot/__init__.py ADDED
File without changes
chatbot/bot.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from chatbot.retriever import HybridRetrieverReranker
2
+ from litellm import completion
3
+ import os
4
+ import ast
5
+
6
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
7
+ DENSE_RETRIEVER_MODEL_NAME = "all-MiniLM-L6-v2"
8
+ CROSS_ENCODER_MODEL_NAME = "cross-encoder/ms-marco-MiniLM-L-12-v2"
9
+ LLM_CORE_MODEL_NAME = "groq/llama3-8b-8192"
10
+
11
+
12
+ class QuestionAnsweringBot:
13
+
14
+ def __init__(self, docs, enable_bm25=True, enable_dense=True, enable_rerank=True, top_k_bm25=60, top_n_dense=30, top_n_rerank=2) -> None:
15
+ self.retriever = HybridRetrieverReranker(docs)
16
+ self.enable_bm25 = enable_bm25
17
+ self.enable_dense = enable_dense
18
+ self.enable_rerank = enable_rerank
19
+ self.top_k_bm25=top_k_bm25
20
+ self.top_n_dense=top_n_dense
21
+ self.top_n_rerank=top_n_rerank
22
+
23
+ def __get_answer__(self, question: str) -> str:
24
+ PROMPT = """\
25
+ You are an intelligent assistant designed to provide accurate and relevant answers based on the provided context.
26
+
27
+ Rules:
28
+ - Always analyze the provided context thoroughly before answering.
29
+ - Respond with factual and concise information.
30
+ - If context is ambiguous or insufficient or you can't find answer, say 'I don't know.'
31
+ - Do not speculate or fabricate information beyond the provided context.
32
+ - Follow user instructions on the response style(default style is detailed response if user didn't provide any specifications):
33
+ - If the user asks for a detailed response, provide comprehensive explanations.
34
+ - If the user requests brevity, give concise and to-the-point answers.
35
+ - When applicable, summarize and synthesize information from the context to answer effectively.
36
+ - Avoid using information outside the given context.
37
+ """
38
+ context = self.retriever.hybrid_retrieve(question,
39
+ enable_bm25=self.enable_bm25,
40
+ enable_dense=self.enable_dense,
41
+ enable_rerank=self.enable_rerank,
42
+ top_k_bm25=self.top_k_bm25,
43
+ top_n_dense=self.top_n_dense,
44
+ top_n_rerank=self.top_n_rerank
45
+ )
46
+
47
+ context_text = [doc['raw_text'] for doc in context]
48
+
49
+ response = completion(
50
+ model=LLM_CORE_MODEL_NAME,
51
+ temperature=0.0,
52
+ messages=[
53
+ {"role": "system", "content": PROMPT},
54
+ {"role": "user", "content": f"Context: {context_text}\nQuestion: {question}"}
55
+ ],
56
+ api_key=GROQ_API_KEY
57
+ )
58
+ return response, context
59
+
60
+ def form_response(self, question):
61
+ llm_response, context = self.__get_answer__(question)
62
+
63
+ metadata_raw = [doc['chapter_name'] for doc in context]
64
+ metadata_cleaned = [ast.literal_eval(item) for item in metadata_raw]
65
+
66
+ print('User:', question)
67
+ print('System:', llm_response.choices[0].message.content)
68
+
69
+ return f"**{llm_response.choices[0].message.content}**\n\nResources: {[chapter for doc in metadata_cleaned for chapter in doc]}"
chatbot/retriever.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rank_bm25 import BM25Okapi
2
+ import numpy as np
3
+ import torch
4
+ import re
5
+ import string
6
+ from sentence_transformers import SentenceTransformer, util, CrossEncoder
7
+
8
+
9
+ DENSE_RETRIEVER_MODEL_NAME = "all-MiniLM-L6-v2"
10
+ CROSS_ENCODER_MODEL_NAME = 'cross-encoder/ms-marco-MiniLM-L-12-v2'
11
+ LLM_CORE_MODEL_NAME = "groq/llama3-8b-8192"
12
+
13
+
14
+ def clean_text(text):
15
+ text = text.translate(str.maketrans('', '', string.punctuation))
16
+ text = text.lower()
17
+ text = re.sub(r'[^a-zA-Z0-9\s]', '', text)
18
+ text = re.sub(r'\s+', ' ', text)
19
+
20
+ return text.strip()
21
+
22
+
23
+ class HybridRetrieverReranker:
24
+ def __init__(self, dataset, dense_model_name=DENSE_RETRIEVER_MODEL_NAME, cross_encoder_model=CROSS_ENCODER_MODEL_NAME):
25
+ if 'cleaned_text' not in dataset.columns:
26
+ raise ValueError("Dataset must contain a 'cleaned_text' column.")
27
+
28
+ self.dataset = dataset
29
+ self.bm25_corpus = dataset['cleaned_text'].tolist()
30
+ self.tokenized_corpus = [chunk.split() for chunk in self.bm25_corpus]
31
+ self.bm25 = BM25Okapi(self.tokenized_corpus)
32
+
33
+ self.dense_model = SentenceTransformer(dense_model_name)
34
+ self.cross_encoder = CrossEncoder(cross_encoder_model)
35
+
36
+
37
+ def bm25_retrieve(self, query, top_k=70):
38
+ """
39
+ Retrieve top K documents using BM25.
40
+
41
+ Args:
42
+ query (str): Query text.
43
+ top_k (int): Number of top BM25 documents to retrieve.
44
+
45
+ Returns:
46
+ list of dict: Top K BM25 results.
47
+ """
48
+ cleaned_query = clean_text(query)
49
+ query_tokens = cleaned_query.split()
50
+ bm25_scores = self.bm25.get_scores(query_tokens)
51
+ top_k_indices = np.argsort(bm25_scores)[::-1][:top_k]
52
+ return self.dataset.iloc[top_k_indices].to_dict(orient='records')
53
+
54
+
55
+ def dense_retrieve(self, query, candidates=None, top_n=35):
56
+ """
57
+ Retrieve top N documents using dense retrieval with LaBSE.
58
+
59
+ Args:
60
+ query (str): Query text.
61
+ candidates (list of dict): Candidate documents from BM25.
62
+ top_n (int): Number of top dense results to retrieve.
63
+
64
+ Returns:
65
+ list of dict: Top N dense results.
66
+ """
67
+ if candidates is None:
68
+ candidates = self.dataset.to_dict(orient='records')
69
+ query_embedding = self.dense_model.encode(query, convert_to_tensor=True)
70
+
71
+ candidate_embeddings = torch.stack([
72
+ eval(doc['chunk_embedding'].replace('tensor', 'torch.tensor')).clone().detach()
73
+ for doc in candidates
74
+ ])
75
+
76
+ similarities = util.pytorch_cos_sim(query_embedding, candidate_embeddings).squeeze(0)
77
+ top_n_indices = torch.topk(similarities, top_n).indices
78
+ return [candidates[idx] for idx in top_n_indices]
79
+
80
+
81
+ def rerank(self, query, candidates=None, top_n=3):
82
+ """
83
+ Rerank top documents using a CrossEncoder.
84
+
85
+ Args:
86
+ query (str): Query text.
87
+ candidates (list of dict): Candidate documents from dense retriever.
88
+ top_n (int): Number of top reranked results to return.
89
+
90
+ Returns:
91
+ list of dict: Top N reranked documents.
92
+ """
93
+ if candidates is None:
94
+ candidates = self.dataset.to_dict(orient='records')
95
+ query_document_pairs = [(query, doc['raw_text']) for doc in candidates]
96
+ scores = self.cross_encoder.predict(query_document_pairs)
97
+ top_n_indices = np.argsort(scores)[::-1][:top_n]
98
+ return [candidates[idx] for idx in top_n_indices]
99
+
100
+
101
+
102
+ def hybrid_retrieve(self, query, enable_bm25=True, enable_dense=True, enable_rerank=True, top_k_bm25=60, top_n_dense=30, top_n_rerank=2):
103
+ """
104
+ Perform hybrid retrieval: BM25 followed by dense retrieval and optional reranking.
105
+
106
+ Args:
107
+ query (str): Query text.
108
+ top_k_bm25 (int): Number of top BM25 documents to retrieve.
109
+ top_n_dense (int): Number of top dense results to retrieve.
110
+ enable_dense (bool): Whether dense retrieval should be enabled.
111
+ enable_rerank (bool): Whether reranking should be enabled.
112
+ top_n_rerank (int): Number of top reranked documents to return.
113
+
114
+ Returns:
115
+ list of dict: Final top results after hybrid retrieval and reranking.
116
+ """
117
+ if enable_bm25:
118
+ bm25_results = self.bm25_retrieve(query, top_k=top_k_bm25)
119
+ else:
120
+ bm25_results = None
121
+
122
+ if enable_dense:
123
+ dense_results = self.dense_retrieve(query, bm25_results, top_n=top_n_dense)
124
+ else:
125
+ dense_results = bm25_results
126
+
127
+ if enable_rerank:
128
+ final_results = self.rerank(query, dense_results, top_n=top_n_rerank)
129
+ else:
130
+ final_results = dense_results
131
+
132
+ return final_results
data/CountMonteCristoFull.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/chunked_data_corpus.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/prepare_data.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import string
4
+ from sentence_transformers import SentenceTransformer
5
+ from langchain_text_splitters import CharacterTextSplitter
6
+ import pandas as pd
7
+
8
+ DATA_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "CountMonteCristoFull.txt")
9
+ DENSE_RETRIEVER_MODEL_NAME = "all-MiniLM-L6-v2"
10
+ CROSS_ENCODER_MODEL_NAME = 'cross-encoder/ms-marco-MiniLM-L-12-v2'
11
+ LLM_CORE_MODEL_NAME = "groq/llama3-8b-8192"
12
+
13
+ with open(DATA_FILE_PATH, "r", encoding="utf-8") as f:
14
+ data_corpus = f.read()
15
+
16
+ splitter = CharacterTextSplitter(separator="\n\n", chunk_size=10_000, chunk_overlap=1_000)
17
+ text_chunks = splitter.create_documents([data_corpus])
18
+
19
+ prev_chapter_name = ''
20
+ for chunk in text_chunks:
21
+ chunk.metadata['belongs_to'] = set()
22
+ curr_chapter_name = ''
23
+ index_start_chapter_name = chunk.page_content.find('Chapter')
24
+
25
+ if index_start_chapter_name == -1:
26
+ curr_chapter_name = prev_chapter_name
27
+ else:
28
+ # if prev_chapter_name is not empty and next chapter start further than first 40% of the chunk.
29
+ # This means that the name of the prev chapter isn't in this chunk, but relevant info can be found.
30
+ if prev_chapter_name != '' and index_start_chapter_name > int(len(chunk.page_content) * 0.4):
31
+ chunk.metadata['belongs_to'].add(prev_chapter_name)
32
+
33
+ index_end_chapter_name = chunk.page_content.find('\n\n', index_start_chapter_name)
34
+ curr_chapter_name = chunk.page_content[index_start_chapter_name:index_end_chapter_name]
35
+ prev_chapter_name = curr_chapter_name
36
+ chunk.metadata['belongs_to'].add(curr_chapter_name)
37
+
38
+ chunk.metadata['belongs_to'] = list(chunk.metadata['belongs_to'])
39
+
40
+
41
+ def clean_text(text):
42
+ text = text.translate(str.maketrans('', '', string.punctuation))
43
+ text = text.lower()
44
+ text = re.sub(r'[^a-zA-Z0-9\s]', '', text)
45
+ text = re.sub(r'\s+', ' ', text)
46
+
47
+ return text.strip()
48
+
49
+ dense_model = SentenceTransformer(DENSE_RETRIEVER_MODEL_NAME)
50
+
51
+ def calculate_embeddings(text):
52
+ return dense_model.encode(text, convert_to_tensor=True)
53
+
54
+ chunked_data_corpus = []
55
+
56
+ for index, chunk in enumerate(text_chunks):
57
+ chunked_data_corpus.append({
58
+ 'raw_text': chunk.page_content,
59
+ 'cleaned_text': clean_text(chunk.page_content),
60
+ 'chunk_embedding': calculate_embeddings(chunk.page_content),
61
+ 'chapter_name': chunk.metadata['belongs_to']
62
+ })
63
+
64
+ chunked_data_corpus_df = pd.DataFrame(chunked_data_corpus)
65
+
66
+ chunked_data_corpus_df.to_csv('chunked_data_corpus.csv', index=False)
main.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from chatbot.bot import QuestionAnsweringBot
4
+ import os
5
+
6
+ chunked_data_corpus_df = pd.read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data\\chunked_data_corpus.csv"))
7
+
8
+ bot = QuestionAnsweringBot(chunked_data_corpus_df)
9
+
10
+ def message_respond(message, history):
11
+ answer = bot.form_response(message)
12
+ return answer
13
+
14
+ gr.ChatInterface(
15
+ fn=message_respond,
16
+ type="messages",
17
+ title="RAG System for 'The Count of Monte Cristo' book",
18
+ description="Here you can ask any questions in the context of the book 'The Count of Monte Cristo'. API key is already provided.",
19
+ theme=gr.themes.Monochrome(font='Lora', text_size='lg', radius_size='sm'),
20
+ examples=["Who is Monte Cristo?",
21
+ "What is the title of Chapter 93",
22
+ "Why Edmond Dantes was in prison?",
23
+ "How many years does Edmon Dantes spent in prison?",
24
+ "Who said this sentence to whom 'Wait and hope_ (Fac et spera)'?",
25
+ "What is the title of Chapter 64?",
26
+ "Who is the president of the USA?",
27
+ "Who is the author of the book The Count of Monte Cristo?",
28
+ "Tell me about all the main identites in Monte Cristo?"
29
+ ],
30
+ cache_examples=False,
31
+ ).launch()
requirements.txt ADDED
Binary file (4.63 kB). View file