Spaces:
Runtime error
Runtime error
ilia_khristoforov
commited on
Commit
·
304e51f
1
Parent(s):
852b083
На ветке pr/5
Browse filesновый файл: utils/__init__.py
новый файл: utils/bot.py
новый файл: utils/functions.py
изменено: app.py
изменено: requirements.txt
- utils/__init__.py +3 -0
- utils/bot.py +203 -0
- utils/functions.py +72 -0
utils/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .bot import Bot
|
2 |
+
from .functions import make_documents, make_descriptions
|
3 |
+
|
utils/bot.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import langchain
|
2 |
+
from langchain.agents import create_csv_agent
|
3 |
+
from langchain.schema import HumanMessage
|
4 |
+
from langchain.chat_models import ChatOpenAI
|
5 |
+
from langchain.embeddings import OpenAIEmbeddings
|
6 |
+
from langchain.vectorstores import Chroma
|
7 |
+
from typing import List, Dict
|
8 |
+
from langchain.agents import AgentType
|
9 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
10 |
+
from utils.functions import Matcha_model
|
11 |
+
from PIL import Image
|
12 |
+
from pathlib import Path
|
13 |
+
from langchain.tools import StructuredTool
|
14 |
+
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
|
15 |
+
|
16 |
+
class Bot:
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
openai_api_key: str,
|
21 |
+
file_descriptions: List[Dict[str, any]],
|
22 |
+
text_documents: List[langchain.schema.Document],
|
23 |
+
verbose: bool = False
|
24 |
+
):
|
25 |
+
self.verbose = verbose
|
26 |
+
self.file_descriptions = file_descriptions
|
27 |
+
|
28 |
+
self.llm = ChatOpenAI(
|
29 |
+
openai_api_key=openai_api_key,
|
30 |
+
temperature=0,
|
31 |
+
model_name="gpt-3.5-turbo"
|
32 |
+
)
|
33 |
+
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
34 |
+
# embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
35 |
+
vector_store = Chroma.from_documents(text_documents, embedding_function)
|
36 |
+
self.text_retriever = langchain.chains.RetrievalQAWithSourcesChain.from_chain_type(
|
37 |
+
llm=self.llm,
|
38 |
+
chain_type='stuff',
|
39 |
+
retriever=vector_store.as_retriever()
|
40 |
+
)
|
41 |
+
self.text_search_tool = langchain.agents.Tool(
|
42 |
+
func=self._text_search,
|
43 |
+
description="Use this tool when searching for text information",
|
44 |
+
name="search text information"
|
45 |
+
)
|
46 |
+
|
47 |
+
self.chart_model = Matcha_model()
|
48 |
+
|
49 |
+
def __call__(
|
50 |
+
self,
|
51 |
+
question: str
|
52 |
+
):
|
53 |
+
self.tools = []
|
54 |
+
self.tools.append(self.text_search_tool)
|
55 |
+
file = self._define_appropriate_file(question)
|
56 |
+
if file != "None of the files":
|
57 |
+
number = int(file[file.find('№')+1:])
|
58 |
+
file_description = [x for x in self.file_descriptions if x['number'] == number][0]
|
59 |
+
file_path = file_description['path']
|
60 |
+
|
61 |
+
if Path(file).suffix == '.csv':
|
62 |
+
self.csv_agent = create_csv_agent(
|
63 |
+
llm=self.llm,
|
64 |
+
path=file_path,
|
65 |
+
verbose=self.verbose
|
66 |
+
)
|
67 |
+
|
68 |
+
self._init_tabular_search_tool(file_description)
|
69 |
+
self.tools.append(self.tabular_search_tool)
|
70 |
+
|
71 |
+
else:
|
72 |
+
self._init_chart_search_tool(file_description)
|
73 |
+
self.tools.append(self.chart_search_tool)
|
74 |
+
|
75 |
+
self._init_chatbot()
|
76 |
+
# print(file)
|
77 |
+
response = self.agent(question)
|
78 |
+
return response
|
79 |
+
|
80 |
+
def _init_chatbot(self):
|
81 |
+
|
82 |
+
conversational_memory = ConversationBufferWindowMemory(
|
83 |
+
memory_key='chat_history',
|
84 |
+
k=5,
|
85 |
+
return_messages=True
|
86 |
+
)
|
87 |
+
|
88 |
+
self.agent = langchain.agents.initialize_agent(
|
89 |
+
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
|
90 |
+
tools=self.tools,
|
91 |
+
llm=self.llm,
|
92 |
+
verbose=self.verbose,
|
93 |
+
max_iterations=5,
|
94 |
+
early_stopping_method='generate',
|
95 |
+
memory=conversational_memory
|
96 |
+
)
|
97 |
+
sys_msg = (
|
98 |
+
"You are an expert summarizer and deliverer of information. "
|
99 |
+
"Yet, the reason you are so intelligent is that you make complex "
|
100 |
+
"information incredibly simple to understand. It's actually rather incredible."
|
101 |
+
"When users ask information you refer to the relevant tools."
|
102 |
+
"if one of the tools helped you with only a part of the necessary information, you must "
|
103 |
+
"try to find the missing information using another tool"
|
104 |
+
"if you can't find the information using the provided tools, you MUST "
|
105 |
+
"say 'I don't know'. Don't try to make up an answer."
|
106 |
+
)
|
107 |
+
prompt = self.agent.agent.create_prompt(
|
108 |
+
tools=self.tools,
|
109 |
+
prefix = sys_msg
|
110 |
+
)
|
111 |
+
self.agent.agent.llm_chain.prompt = prompt
|
112 |
+
|
113 |
+
def _text_search(
|
114 |
+
self,
|
115 |
+
query: str
|
116 |
+
) -> str:
|
117 |
+
query = self.text_retriever.prep_inputs(query)
|
118 |
+
res = self.text_retriever(query)['answer']
|
119 |
+
return res
|
120 |
+
|
121 |
+
def _tabular_search(
|
122 |
+
self,
|
123 |
+
query: str
|
124 |
+
) -> str:
|
125 |
+
res = self.csv_agent.run(query)
|
126 |
+
return res
|
127 |
+
|
128 |
+
def _chart_search(
|
129 |
+
self,
|
130 |
+
image,
|
131 |
+
query: str
|
132 |
+
) -> str:
|
133 |
+
image = Image.open(image)
|
134 |
+
res = self.chart_model.chart_qa(image, query)
|
135 |
+
return res
|
136 |
+
|
137 |
+
def _init_chart_search_tool(
|
138 |
+
self,
|
139 |
+
title: str
|
140 |
+
) -> None:
|
141 |
+
title = title
|
142 |
+
description = f"""
|
143 |
+
Use this tool when searching for information on charts.
|
144 |
+
With this tool you can answer the question about related chart.
|
145 |
+
You should ask simple question about a chart, then the tool will give you number.
|
146 |
+
This chart is called {title}.
|
147 |
+
"""
|
148 |
+
|
149 |
+
self.chart_search_tool = StructuredTool(
|
150 |
+
func=self._chart_search,
|
151 |
+
description=description,
|
152 |
+
name="Ask over charts"
|
153 |
+
)
|
154 |
+
|
155 |
+
def _init_tabular_search_tool(
|
156 |
+
self,
|
157 |
+
file_: Dict[str, any]
|
158 |
+
) -> None:
|
159 |
+
|
160 |
+
|
161 |
+
description = f"""
|
162 |
+
Use this tool when searching for tabular information.
|
163 |
+
With this tool you could get access to table.
|
164 |
+
This table title is "{title}" and the names of the columns in this table: {columns}
|
165 |
+
"""
|
166 |
+
|
167 |
+
self.tabular_search_tool = langchain.agents.Tool(
|
168 |
+
func=self._tabular_search,
|
169 |
+
description=description,
|
170 |
+
name="search tabular information"
|
171 |
+
)
|
172 |
+
|
173 |
+
def _define_appropriate_file(
|
174 |
+
self,
|
175 |
+
question: str
|
176 |
+
) -> str:
|
177 |
+
''' Определяет по описаниям таблиц в какой из них может содержаться ответ на вопрос.
|
178 |
+
Возвращает номер таблицы по шаблону "Table №1" или "None of the tables" '''
|
179 |
+
|
180 |
+
message = 'I have list of descriptions: \n'
|
181 |
+
k = 0
|
182 |
+
|
183 |
+
for description in self.file_descriptions:
|
184 |
+
k += 1
|
185 |
+
str_description = f""" {k}) description for File №{description['number']}: """
|
186 |
+
for key, value in description.items():
|
187 |
+
string_val = str(key) + ' : ' + str(value) + '\n'
|
188 |
+
str_description += string_val
|
189 |
+
message += str_description
|
190 |
+
print(message)
|
191 |
+
question = f""" How do you think, which file can help answer the question: "{question}" .
|
192 |
+
Your answer MUST be specific,
|
193 |
+
for example if you think that File №2 can help answer the question, you MUST just write "File №2!".
|
194 |
+
If you think that none of the files can help answer the question just write "None of the files!"
|
195 |
+
Don't include to answer information about your thinking.
|
196 |
+
"""
|
197 |
+
message += question
|
198 |
+
|
199 |
+
res = self.llm([HumanMessage(content=message)])
|
200 |
+
print(res.content)
|
201 |
+
print(res.content[:-1])
|
202 |
+
return res.content[:-1]
|
203 |
+
|
utils/functions.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import pandas as pd
|
3 |
+
from langchain.document_loaders import PyPDFLoader
|
4 |
+
from langchain.text_splitter import CharacterTextSplitter
|
5 |
+
import torch
|
6 |
+
from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor
|
7 |
+
from pathlib import Path
|
8 |
+
|
9 |
+
def make_descriptions(file, title):
|
10 |
+
if Path(file).suffix == '.csv':
|
11 |
+
# print(file)
|
12 |
+
df = pd.read_csv(file)
|
13 |
+
print(df.head())
|
14 |
+
columns = list(df.columns)
|
15 |
+
print(columns)
|
16 |
+
table_description0 = {
|
17 |
+
'path': 'random',
|
18 |
+
'number': 1,
|
19 |
+
'columns': ["clothes", "animals", "students"],
|
20 |
+
'title': "fashionable student clothes"
|
21 |
+
}
|
22 |
+
|
23 |
+
table_description1 = {
|
24 |
+
'path': file,
|
25 |
+
'number': 2,
|
26 |
+
'columns': columns,
|
27 |
+
'title': title
|
28 |
+
}
|
29 |
+
|
30 |
+
table_descriptions = [table_description0, table_description1]
|
31 |
+
return table_descriptions
|
32 |
+
else:
|
33 |
+
file_description = {
|
34 |
+
'path': file,
|
35 |
+
'number': 1,
|
36 |
+
'title': title
|
37 |
+
}
|
38 |
+
file_descriptions = [file_description]
|
39 |
+
return file_descriptions
|
40 |
+
|
41 |
+
|
42 |
+
def make_documents(pdf):
|
43 |
+
loader = PyPDFLoader(pdf)
|
44 |
+
documents = loader.load()
|
45 |
+
|
46 |
+
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0, separator='\n')
|
47 |
+
documents = text_splitter.split_documents(documents)
|
48 |
+
return documents
|
49 |
+
|
50 |
+
class Matcha_model:
|
51 |
+
|
52 |
+
def __init__(self) -> None:
|
53 |
+
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/vis-nlp/ChartQA/main/ChartQA%20Dataset/val/png/20294671002019.png', 'chart_example.png')
|
54 |
+
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/vis-nlp/ChartQA/main/ChartQA%20Dataset/test/png/multi_col_1081.png', 'chart_example_2.png')
|
55 |
+
# torch.hub.download_url_to_file('https://raw.githubusercontent.com/vis-nlp/ChartQA/main/ChartQA%20Dataset/test/png/18143564004789.png', 'chart_example_3.png')
|
56 |
+
# torch.hub.download_url_to_file('https://sharkcoder.com/files/article/matplotlib-bar-plot.png', 'chart_example_4.png')
|
57 |
+
|
58 |
+
self.model_name = "google/matcha-chartqa"
|
59 |
+
self.model = Pix2StructForConditionalGeneration.from_pretrained(self.model_name)
|
60 |
+
self.processor = Pix2StructProcessor.from_pretrained(self.model_name)
|
61 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
62 |
+
self.model.to(self.device)
|
63 |
+
|
64 |
+
def _filter_output(self, output):
|
65 |
+
return output.replace("<0x0A>", "")
|
66 |
+
|
67 |
+
def chart_qa(self, image, question: str) -> str:
|
68 |
+
inputs = self.processor(images=image, text=question, return_tensors="pt").to(self.device)
|
69 |
+
predictions = self.model.generate(**inputs, max_new_tokens=512)
|
70 |
+
return self._filter_output(self.processor.decode(predictions[0], skip_special_tokens=True))
|
71 |
+
|
72 |
+
|