Spaces:
Sleeping
Sleeping
Fixed README conflict
Browse files- .chainlit/config.toml +84 -0
- .gitignore +160 -0
- Dockerfile +11 -0
- LICENSE +21 -0
- aimakerspace/__init__.py +0 -0
- aimakerspace/openai_utils/__init__.py +0 -0
- aimakerspace/openai_utils/chatmodel.py +38 -0
- aimakerspace/openai_utils/embedding.py +59 -0
- aimakerspace/openai_utils/prompts.py +75 -0
- aimakerspace/text_utils.py +136 -0
- aimakerspace/vectordatabase.py +81 -0
- app.py +150 -0
- chainlit.md +3 -0
- requirements.txt +11 -0
.chainlit/config.toml
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
+
enable_telemetry = true
|
4 |
+
|
5 |
+
# List of environment variables to be provided by each user to use the app.
|
6 |
+
user_env = []
|
7 |
+
|
8 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
9 |
+
session_timeout = 3600
|
10 |
+
|
11 |
+
# Enable third parties caching (e.g LangChain cache)
|
12 |
+
cache = false
|
13 |
+
|
14 |
+
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
15 |
+
# follow_symlink = false
|
16 |
+
|
17 |
+
[features]
|
18 |
+
# Show the prompt playground
|
19 |
+
prompt_playground = true
|
20 |
+
|
21 |
+
# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
|
22 |
+
unsafe_allow_html = false
|
23 |
+
|
24 |
+
# Process and display mathematical expressions. This can clash with "$" characters in messages.
|
25 |
+
latex = false
|
26 |
+
|
27 |
+
# Authorize users to upload files with messages
|
28 |
+
multi_modal = true
|
29 |
+
|
30 |
+
# Allows user to use speech to text
|
31 |
+
[features.speech_to_text]
|
32 |
+
enabled = false
|
33 |
+
# See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
|
34 |
+
# language = "en-US"
|
35 |
+
|
36 |
+
[UI]
|
37 |
+
# Name of the app and chatbot.
|
38 |
+
name = "Chatbot"
|
39 |
+
|
40 |
+
# Show the readme while the conversation is empty.
|
41 |
+
show_readme_as_default = true
|
42 |
+
|
43 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
44 |
+
# description = ""
|
45 |
+
|
46 |
+
# Large size content are by default collapsed for a cleaner ui
|
47 |
+
default_collapse_content = true
|
48 |
+
|
49 |
+
# The default value for the expand messages settings.
|
50 |
+
default_expand_messages = false
|
51 |
+
|
52 |
+
# Hide the chain of thought details from the user in the UI.
|
53 |
+
hide_cot = false
|
54 |
+
|
55 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
56 |
+
# github = ""
|
57 |
+
|
58 |
+
# Specify a CSS file that can be used to customize the user interface.
|
59 |
+
# The CSS file can be served from the public directory or via an external link.
|
60 |
+
# custom_css = "/public/test.css"
|
61 |
+
|
62 |
+
# Override default MUI light theme. (Check theme.ts)
|
63 |
+
[UI.theme.light]
|
64 |
+
#background = "#FAFAFA"
|
65 |
+
#paper = "#FFFFFF"
|
66 |
+
|
67 |
+
[UI.theme.light.primary]
|
68 |
+
#main = "#F80061"
|
69 |
+
#dark = "#980039"
|
70 |
+
#light = "#FFE7EB"
|
71 |
+
|
72 |
+
# Override default MUI dark theme. (Check theme.ts)
|
73 |
+
[UI.theme.dark]
|
74 |
+
#background = "#FAFAFA"
|
75 |
+
#paper = "#FFFFFF"
|
76 |
+
|
77 |
+
[UI.theme.dark.primary]
|
78 |
+
#main = "#F80061"
|
79 |
+
#dark = "#980039"
|
80 |
+
#light = "#FFE7EB"
|
81 |
+
|
82 |
+
|
83 |
+
[meta]
|
84 |
+
generated_by = "0.7.700"
|
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Ruben Alvarez
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
aimakerspace/__init__.py
ADDED
File without changes
|
aimakerspace/openai_utils/__init__.py
ADDED
File without changes
|
aimakerspace/openai_utils/chatmodel.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from prompts import UserRolePrompt, SystemRolePrompt
|
4 |
+
import os
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
|
9 |
+
class ChatOpenAI:
|
10 |
+
def __init__(self, model_name: str = "gpt-3.5-turbo"):
|
11 |
+
self.model_name = model_name
|
12 |
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
13 |
+
if self.openai_api_key is None:
|
14 |
+
raise ValueError("OPENAI_API_KEY is not set")
|
15 |
+
|
16 |
+
def run(self, messages, text_only: bool = True):
|
17 |
+
if not isinstance(messages, list):
|
18 |
+
raise ValueError("messages must be a list")
|
19 |
+
|
20 |
+
client = OpenAI()
|
21 |
+
response = client.chat.completions.create(
|
22 |
+
model=self.model_name, messages=messages
|
23 |
+
)
|
24 |
+
|
25 |
+
if text_only:
|
26 |
+
return response.choices[0].message.content
|
27 |
+
|
28 |
+
return response
|
29 |
+
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
chat = ChatOpenAI()
|
33 |
+
prompt = UserRolePrompt("Hello, I am a human.")
|
34 |
+
prompt = prompt.create_message()
|
35 |
+
print(prompt)
|
36 |
+
|
37 |
+
response = chat.run([prompt])
|
38 |
+
print(response)
|
aimakerspace/openai_utils/embedding.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
from openai import AsyncOpenAI, OpenAI
|
3 |
+
import openai
|
4 |
+
from typing import List
|
5 |
+
import os
|
6 |
+
import asyncio
|
7 |
+
|
8 |
+
|
9 |
+
class EmbeddingModel:
|
10 |
+
def __init__(self, embeddings_model_name: str = "text-embedding-ada-002"):
|
11 |
+
load_dotenv()
|
12 |
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
13 |
+
self.async_client = AsyncOpenAI()
|
14 |
+
self.client = OpenAI()
|
15 |
+
|
16 |
+
if self.openai_api_key is None:
|
17 |
+
raise ValueError(
|
18 |
+
"OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key."
|
19 |
+
)
|
20 |
+
openai.api_key = self.openai_api_key
|
21 |
+
self.embeddings_model_name = embeddings_model_name
|
22 |
+
|
23 |
+
async def async_get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
|
24 |
+
embedding_response = await self.async_client.embeddings.create(
|
25 |
+
input=list_of_text, model=self.embeddings_model_name
|
26 |
+
)
|
27 |
+
|
28 |
+
return [embeddings.embedding for embeddings in embedding_response.data]
|
29 |
+
|
30 |
+
async def async_get_embedding(self, text: str) -> List[float]:
|
31 |
+
embedding = await self.async_client.embeddings.create(
|
32 |
+
input=text, model=self.embeddings_model_name
|
33 |
+
)
|
34 |
+
|
35 |
+
return embedding.data[0].embedding
|
36 |
+
|
37 |
+
def get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
|
38 |
+
embedding_response = self.client.embeddings.create(
|
39 |
+
input=list_of_text, model=self.embeddings_model_name
|
40 |
+
)
|
41 |
+
|
42 |
+
return [embeddings.embedding for embeddings in embedding_response.data]
|
43 |
+
|
44 |
+
def get_embedding(self, text: str) -> List[float]:
|
45 |
+
embedding = self.client.embeddings.create(
|
46 |
+
input=text, model=self.embeddings_model_name
|
47 |
+
)
|
48 |
+
|
49 |
+
return embedding.data[0].embedding
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
embedding_model = EmbeddingModel()
|
54 |
+
print(asyncio.run(embedding_model.async_get_embedding("Hello, world!")))
|
55 |
+
print(
|
56 |
+
asyncio.run(
|
57 |
+
embedding_model.async_get_embeddings(["Hello, world!", "Goodbye, world!"])
|
58 |
+
)
|
59 |
+
)
|
aimakerspace/openai_utils/prompts.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
|
4 |
+
class BasePrompt:
|
5 |
+
def __init__(self, prompt):
|
6 |
+
"""
|
7 |
+
Initializes the BasePrompt object with a prompt template.
|
8 |
+
|
9 |
+
:param prompt: A string that can contain placeholders within curly braces
|
10 |
+
"""
|
11 |
+
self.prompt = prompt
|
12 |
+
self._pattern = re.compile(r"\{([^}]+)\}")
|
13 |
+
|
14 |
+
def format_prompt(self, **kwargs):
|
15 |
+
"""
|
16 |
+
Formats the prompt string using the keyword arguments provided.
|
17 |
+
|
18 |
+
:param kwargs: The values to substitute into the prompt string
|
19 |
+
:return: The formatted prompt string
|
20 |
+
"""
|
21 |
+
matches = self._pattern.findall(self.prompt)
|
22 |
+
return self.prompt.format(**{match: kwargs.get(match, "") for match in matches})
|
23 |
+
|
24 |
+
def get_input_variables(self):
|
25 |
+
"""
|
26 |
+
Gets the list of input variable names from the prompt string.
|
27 |
+
|
28 |
+
:return: List of input variable names
|
29 |
+
"""
|
30 |
+
return self._pattern.findall(self.prompt)
|
31 |
+
|
32 |
+
|
33 |
+
class RolePrompt(BasePrompt):
|
34 |
+
def __init__(self, prompt, role: str):
|
35 |
+
"""
|
36 |
+
Initializes the RolePrompt object with a prompt template and a role.
|
37 |
+
|
38 |
+
:param prompt: A string that can contain placeholders within curly braces
|
39 |
+
:param role: The role for the message ('system', 'user', or 'assistant')
|
40 |
+
"""
|
41 |
+
super().__init__(prompt)
|
42 |
+
self.role = role
|
43 |
+
|
44 |
+
def create_message(self, **kwargs):
|
45 |
+
"""
|
46 |
+
Creates a message dictionary with a role and a formatted message.
|
47 |
+
|
48 |
+
:param kwargs: The values to substitute into the prompt string
|
49 |
+
:return: Dictionary containing the role and the formatted message
|
50 |
+
"""
|
51 |
+
return {"role": self.role, "content": self.format_prompt(**kwargs)}
|
52 |
+
|
53 |
+
|
54 |
+
class SystemRolePrompt(RolePrompt):
|
55 |
+
def __init__(self, prompt: str):
|
56 |
+
super().__init__(prompt, "system")
|
57 |
+
|
58 |
+
|
59 |
+
class UserRolePrompt(RolePrompt):
|
60 |
+
def __init__(self, prompt: str):
|
61 |
+
super().__init__(prompt, "user")
|
62 |
+
|
63 |
+
|
64 |
+
class AssistantRolePrompt(RolePrompt):
|
65 |
+
def __init__(self, prompt: str):
|
66 |
+
super().__init__(prompt, "assistant")
|
67 |
+
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
prompt = BasePrompt("Hello {name}, you are {age} years old")
|
71 |
+
print(prompt.format_prompt(name="John", age=30))
|
72 |
+
|
73 |
+
prompt = SystemRolePrompt("Hello {name}, you are {age} years old")
|
74 |
+
print(prompt.create_message(name="John", age=30))
|
75 |
+
print(prompt.get_input_variables())
|
aimakerspace/text_utils.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List, Union
|
3 |
+
from pdfminer.high_level import extract_text
|
4 |
+
import io
|
5 |
+
from chainlit.types import AskFileResponse
|
6 |
+
import re
|
7 |
+
|
8 |
+
|
9 |
+
class TextFileLoader:
|
10 |
+
def __init__(self, path: str, encoding: str = "utf-8"):
|
11 |
+
self.documents = []
|
12 |
+
self.path = path
|
13 |
+
self.encoding = encoding
|
14 |
+
|
15 |
+
def load(self):
|
16 |
+
if os.path.isdir(self.path):
|
17 |
+
self.load_directory()
|
18 |
+
elif os.path.isfile(self.path) and self.path.endswith(".txt"):
|
19 |
+
self.load_file()
|
20 |
+
else:
|
21 |
+
raise ValueError(
|
22 |
+
"Provided path is neither a valid directory nor a .txt file."
|
23 |
+
)
|
24 |
+
|
25 |
+
def load_file(self):
|
26 |
+
with open(self.path, "r", encoding=self.encoding) as f:
|
27 |
+
self.documents.append(f.read())
|
28 |
+
|
29 |
+
def load_directory(self):
|
30 |
+
for root, _, files in os.walk(self.path):
|
31 |
+
for file in files:
|
32 |
+
if file.endswith(".txt"):
|
33 |
+
with open(
|
34 |
+
os.path.join(root, file), "r", encoding=self.encoding
|
35 |
+
) as f:
|
36 |
+
self.documents.append(f.read())
|
37 |
+
|
38 |
+
def load_documents(self):
|
39 |
+
self.load()
|
40 |
+
return self.documents
|
41 |
+
|
42 |
+
class PDFFileLoader(TextFileLoader):
|
43 |
+
def __init__(self, path: str, encoding: str = "utf-8", content=None, files: list[AskFileResponse] = None):
|
44 |
+
super().__init__(path, encoding)
|
45 |
+
self.content = content
|
46 |
+
self.files = files
|
47 |
+
|
48 |
+
def load(self):
|
49 |
+
if isinstance(self.files, List):
|
50 |
+
for file in self.files:
|
51 |
+
if file.content and file.path.endswith(".pdf"):
|
52 |
+
self.content = file.content
|
53 |
+
self.load_content()
|
54 |
+
elif os.path.isdir(self.path):
|
55 |
+
self.load_directory()
|
56 |
+
elif os.path.isfile(self.path) and self.path.endswith(".pdf"):
|
57 |
+
print("loading file ...")
|
58 |
+
self.load_file()
|
59 |
+
elif self.content and self.path.endswith(".pdf"):
|
60 |
+
print("loading content ...")
|
61 |
+
self.load_content()
|
62 |
+
else:
|
63 |
+
raise ValueError(
|
64 |
+
"Provided path is neither a valid directory nor a .pdf file."
|
65 |
+
)
|
66 |
+
|
67 |
+
def load_content(self):
|
68 |
+
"""Load pdf already in memory"""
|
69 |
+
text = extract_text(io.BytesIO(self.content))
|
70 |
+
text = self.clean_text(text)
|
71 |
+
self.documents.append(text)
|
72 |
+
|
73 |
+
def clean_text(self, text):
|
74 |
+
"""Clean text by removing special characters."""
|
75 |
+
# remove all \n
|
76 |
+
text = text.replace('\n', ' ')
|
77 |
+
text = re.sub(' +', ' ', text)
|
78 |
+
# remove page number, we find it because it appears before '\x0c', use regex to find it
|
79 |
+
text = re.sub(r'\d+ \x0c', '\x0c', text)
|
80 |
+
# remove all '\x0c'
|
81 |
+
text = text.replace('\x0c', ' ')
|
82 |
+
return text
|
83 |
+
|
84 |
+
def load_file(self):
|
85 |
+
text = extract_text(pdf_file=self.path, codec=self.encoding)
|
86 |
+
self.documents.append(text)
|
87 |
+
|
88 |
+
def load_directory(self):
|
89 |
+
for root, _, files in os.walk(self.path):
|
90 |
+
for file in files:
|
91 |
+
if file.endswith(".pdf"):
|
92 |
+
self.documents.append(
|
93 |
+
extract_text(os.path.join(root, file), encoding=self.encoding)
|
94 |
+
)
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
class CharacterTextSplitter:
|
99 |
+
def __init__(
|
100 |
+
self,
|
101 |
+
chunk_size: int = 1000,
|
102 |
+
chunk_overlap: int = 200,
|
103 |
+
):
|
104 |
+
assert (
|
105 |
+
chunk_size > chunk_overlap
|
106 |
+
), "Chunk size must be greater than chunk overlap"
|
107 |
+
|
108 |
+
self.chunk_size = chunk_size
|
109 |
+
self.chunk_overlap = chunk_overlap
|
110 |
+
|
111 |
+
def split(self, text: str) -> List[str]:
|
112 |
+
chunks = []
|
113 |
+
for i in range(0, len(text), self.chunk_size - self.chunk_overlap):
|
114 |
+
chunks.append(text[i : i + self.chunk_size])
|
115 |
+
return chunks
|
116 |
+
|
117 |
+
def split_texts(self, texts: List[str]) -> List[str]:
|
118 |
+
chunks = []
|
119 |
+
for text in texts:
|
120 |
+
chunks.extend(self.split(text))
|
121 |
+
return chunks
|
122 |
+
|
123 |
+
|
124 |
+
if __name__ == "__main__":
|
125 |
+
loader = TextFileLoader("data/KingLear.txt")
|
126 |
+
loader.load()
|
127 |
+
splitter = CharacterTextSplitter()
|
128 |
+
chunks = splitter.split_texts(loader.documents)
|
129 |
+
print(len(chunks))
|
130 |
+
print(chunks[0])
|
131 |
+
print("--------")
|
132 |
+
print(chunks[1])
|
133 |
+
print("--------")
|
134 |
+
print(chunks[-2])
|
135 |
+
print("--------")
|
136 |
+
print(chunks[-1])
|
aimakerspace/vectordatabase.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from collections import defaultdict
|
3 |
+
from typing import List, Tuple, Callable
|
4 |
+
from aimakerspace.openai_utils.embedding import EmbeddingModel
|
5 |
+
import asyncio
|
6 |
+
|
7 |
+
|
8 |
+
def cosine_similarity(vector_a: np.array, vector_b: np.array) -> float:
|
9 |
+
"""Computes the cosine similarity between two vectors."""
|
10 |
+
dot_product = np.dot(vector_a, vector_b)
|
11 |
+
norm_a = np.linalg.norm(vector_a)
|
12 |
+
norm_b = np.linalg.norm(vector_b)
|
13 |
+
return dot_product / (norm_a * norm_b)
|
14 |
+
|
15 |
+
|
16 |
+
class VectorDatabase:
|
17 |
+
def __init__(self, embedding_model: EmbeddingModel = None):
|
18 |
+
self.vectors = defaultdict(np.array)
|
19 |
+
self.embedding_model = embedding_model or EmbeddingModel()
|
20 |
+
|
21 |
+
def insert(self, key: str, vector: np.array) -> None:
|
22 |
+
self.vectors[key] = vector
|
23 |
+
|
24 |
+
def search(
|
25 |
+
self,
|
26 |
+
query_vector: np.array,
|
27 |
+
k: int,
|
28 |
+
distance_measure: Callable = cosine_similarity,
|
29 |
+
) -> List[Tuple[str, float]]:
|
30 |
+
scores = [
|
31 |
+
(key, distance_measure(query_vector, vector))
|
32 |
+
for key, vector in self.vectors.items()
|
33 |
+
]
|
34 |
+
return sorted(scores, key=lambda x: x[1], reverse=True)[:k]
|
35 |
+
|
36 |
+
def search_by_text(
|
37 |
+
self,
|
38 |
+
query_text: str,
|
39 |
+
k: int,
|
40 |
+
distance_measure: Callable = cosine_similarity,
|
41 |
+
return_as_text: bool = False,
|
42 |
+
) -> List[Tuple[str, float]]:
|
43 |
+
query_vector = self.embedding_model.get_embedding(query_text)
|
44 |
+
results = self.search(query_vector, k, distance_measure)
|
45 |
+
return [result[0] for result in results] if return_as_text else results
|
46 |
+
|
47 |
+
def retrieve_from_key(self, key: str) -> np.array:
|
48 |
+
return self.vectors.get(key, None)
|
49 |
+
|
50 |
+
async def abuild_from_list(self, list_of_text: List[str]) -> "VectorDatabase":
|
51 |
+
embeddings = await self.embedding_model.async_get_embeddings(list_of_text)
|
52 |
+
for text, embedding in zip(list_of_text, embeddings):
|
53 |
+
self.insert(text, np.array(embedding))
|
54 |
+
return self
|
55 |
+
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
list_of_text = [
|
59 |
+
"I like to eat broccoli and bananas.",
|
60 |
+
"I ate a banana and spinach smoothie for breakfast.",
|
61 |
+
"Chinchillas and kittens are cute.",
|
62 |
+
"My sister adopted a kitten yesterday.",
|
63 |
+
"Look at this cute hamster munching on a piece of broccoli.",
|
64 |
+
]
|
65 |
+
|
66 |
+
vector_db = VectorDatabase()
|
67 |
+
vector_db = asyncio.run(vector_db.abuild_from_list(list_of_text))
|
68 |
+
k = 2
|
69 |
+
|
70 |
+
searched_vector = vector_db.search_by_text("I think fruit is awesome!", k=k)
|
71 |
+
print(f"Closest {k} vector(s):", searched_vector)
|
72 |
+
|
73 |
+
retrieved_vector = vector_db.retrieve_from_key(
|
74 |
+
"I like to eat broccoli and bananas."
|
75 |
+
)
|
76 |
+
print("Retrieved vector:", retrieved_vector)
|
77 |
+
|
78 |
+
relevant_texts = vector_db.search_by_text(
|
79 |
+
"I think fruit is awesome!", k=k, return_as_text=True
|
80 |
+
)
|
81 |
+
print(f"Closest {k} text(s):", relevant_texts)
|
app.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
|
2 |
+
|
3 |
+
# OpenAI Chat completion
|
4 |
+
import os
|
5 |
+
from openai import AsyncOpenAI # importing openai for API usage
|
6 |
+
import chainlit as cl # importing chainlit for our app
|
7 |
+
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
8 |
+
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
from aimakerspace.text_utils import PDFFileLoader, CharacterTextSplitter
|
11 |
+
from aimakerspace.vectordatabase import VectorDatabase
|
12 |
+
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
# ChatOpenAI Templates
|
16 |
+
system_template = """You are a Wizzard and everything you say is a spell!
|
17 |
+
"""
|
18 |
+
|
19 |
+
user_template = """{input}
|
20 |
+
Wizzard, think through your response step by step.
|
21 |
+
"""
|
22 |
+
|
23 |
+
assistant_template = """Use the following context, if any, to help you
|
24 |
+
answer the user's input, if the answer is not in the context say you don't
|
25 |
+
know the answer.
|
26 |
+
CONTEXT:
|
27 |
+
===============
|
28 |
+
{context}
|
29 |
+
===============
|
30 |
+
|
31 |
+
Spell away Wizzard!
|
32 |
+
"""
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
37 |
+
async def start_chat():
|
38 |
+
settings = {
|
39 |
+
"model": "gpt-3.5-turbo",
|
40 |
+
"temperature": 0,
|
41 |
+
"max_tokens": 500,
|
42 |
+
"top_p": 1,
|
43 |
+
"frequency_penalty": 0,
|
44 |
+
"presence_penalty": 0,
|
45 |
+
}
|
46 |
+
|
47 |
+
cl.user_session.set("settings", settings)
|
48 |
+
|
49 |
+
files = None
|
50 |
+
while files is None:
|
51 |
+
files = await cl.AskFileMessage(
|
52 |
+
content="Please upload a PDF file to begin",
|
53 |
+
accept=["application/pdf"],
|
54 |
+
max_files=10,
|
55 |
+
max_size_mb=10,
|
56 |
+
timeout=60
|
57 |
+
).send()
|
58 |
+
|
59 |
+
# let the user know you are processing the file(s)
|
60 |
+
await cl.Message(
|
61 |
+
content="Loading your files..."
|
62 |
+
).send()
|
63 |
+
|
64 |
+
# decode the file
|
65 |
+
documents = PDFFileLoader(path="", files=files).load_documents()
|
66 |
+
|
67 |
+
# split the text into chunks
|
68 |
+
chunks = CharacterTextSplitter(
|
69 |
+
chunk_size=1000,
|
70 |
+
chunk_overlap=200
|
71 |
+
).split_texts(documents)
|
72 |
+
|
73 |
+
print(chunks[0])
|
74 |
+
|
75 |
+
# create a vector store
|
76 |
+
# let the user know you are processing the document(s)
|
77 |
+
await cl.Message(
|
78 |
+
content="Creating vector store"
|
79 |
+
).send()
|
80 |
+
|
81 |
+
vector_db = VectorDatabase()
|
82 |
+
vector_db = await vector_db.abuild_from_list(chunks)
|
83 |
+
|
84 |
+
await cl.Message(
|
85 |
+
content="Done. Ask away!"
|
86 |
+
).send()
|
87 |
+
|
88 |
+
cl.user_session.set("vector_db", vector_db)
|
89 |
+
|
90 |
+
|
91 |
+
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
92 |
+
async def main(message: cl.Message):
|
93 |
+
vector_db = cl.user_session.get("vector_db")
|
94 |
+
settings = cl.user_session.get("settings")
|
95 |
+
|
96 |
+
client = AsyncOpenAI()
|
97 |
+
|
98 |
+
print(message.content)
|
99 |
+
|
100 |
+
results_list = vector_db.search_by_text(query_text=message.content, k=3, return_as_text=True)
|
101 |
+
if results_list:
|
102 |
+
results_string = "\n\n".join(results_list)
|
103 |
+
else:
|
104 |
+
results_string = ""
|
105 |
+
|
106 |
+
prompt = Prompt(
|
107 |
+
provider=ChatOpenAI.id,
|
108 |
+
messages=[
|
109 |
+
PromptMessage(
|
110 |
+
role="system",
|
111 |
+
template=system_template,
|
112 |
+
formatted=system_template,
|
113 |
+
),
|
114 |
+
PromptMessage(
|
115 |
+
role="user",
|
116 |
+
template=user_template,
|
117 |
+
formatted=user_template.format(input=message.content),
|
118 |
+
),
|
119 |
+
PromptMessage(
|
120 |
+
role="assistant",
|
121 |
+
template=assistant_template,
|
122 |
+
formatted=assistant_template.format(context=results_string)
|
123 |
+
)
|
124 |
+
],
|
125 |
+
inputs={
|
126 |
+
"input": message.content,
|
127 |
+
"context": results_string
|
128 |
+
},
|
129 |
+
settings=settings,
|
130 |
+
)
|
131 |
+
|
132 |
+
print([m.to_openai() for m in prompt.messages])
|
133 |
+
|
134 |
+
msg = cl.Message(content="")
|
135 |
+
|
136 |
+
# Call OpenAI
|
137 |
+
async for stream_resp in await client.chat.completions.create(
|
138 |
+
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
139 |
+
):
|
140 |
+
token = stream_resp.choices[0].delta.content
|
141 |
+
if not token:
|
142 |
+
token = ""
|
143 |
+
await msg.stream_token(token)
|
144 |
+
|
145 |
+
# Update the prompt object with the completion
|
146 |
+
prompt.completion = msg.content
|
147 |
+
msg.prompt = prompt
|
148 |
+
|
149 |
+
# Send and close the message stream
|
150 |
+
await msg.send()
|
chainlit.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Beyond ChatGPT
|
2 |
+
|
3 |
+
Welcome to Chatito GPT, this is a prototyping space for LLM related applications. Follow me [@RubenAMtz](https://twitter.com/RubenAMtz) on Twitter
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==0.7.700
|
2 |
+
cohere==4.37
|
3 |
+
openai==1.3.5
|
4 |
+
tiktoken==0.5.1
|
5 |
+
python-dotenv==1.0.0
|
6 |
+
numpy==1.25.2
|
7 |
+
pandas
|
8 |
+
scikit-learn
|
9 |
+
matplotlib
|
10 |
+
plotly
|
11 |
+
pdfminer.six
|