Spaces:
Running
Running
arithescientist
commited on
Commit
•
b6f0b52
1
Parent(s):
2129665
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
-
import os
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
import sqlite3
|
5 |
import logging
|
6 |
-
|
7 |
-
|
8 |
-
from
|
9 |
-
from
|
|
|
|
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from langchain.chains import LLMChain
|
12 |
|
@@ -57,7 +59,8 @@ toolkit = SQLDatabaseToolkit(db=db, llm=llm)
|
|
57 |
agent_executor = create_sql_agent(
|
58 |
llm=llm,
|
59 |
toolkit=toolkit,
|
60 |
-
verbose=True
|
|
|
61 |
)
|
62 |
|
63 |
# Step 4: Define the callback function
|
@@ -71,10 +74,35 @@ def process_input():
|
|
71 |
|
72 |
# Use the agent to get the response
|
73 |
with st.spinner("Processing..."):
|
74 |
-
response = agent_executor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
# Append the assistant's response to the history
|
77 |
-
st.session_state.history.append({"role": "assistant", "content":
|
78 |
|
79 |
# Generate insights based on the response
|
80 |
insights_template = """
|
@@ -90,7 +118,7 @@ def process_input():
|
|
90 |
insights_prompt = PromptTemplate(template=insights_template, input_variables=['question', 'response'])
|
91 |
insights_chain = LLMChain(llm=llm, prompt=insights_prompt)
|
92 |
|
93 |
-
insights = insights_chain.run({'question': user_prompt, 'response':
|
94 |
|
95 |
# Append the assistant's insights to the history
|
96 |
st.session_state.history.append({"role": "assistant", "content": insights})
|
|
|
1 |
+
import os
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
import sqlite3
|
5 |
import logging
|
6 |
+
import ast # For parsing string representations of lists
|
7 |
+
|
8 |
+
from langchain_community.chat_models import ChatOpenAI
|
9 |
+
from langchain_community.agent_toolkits.sql.base import create_sql_agent
|
10 |
+
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
11 |
+
from langchain_community.utilities import SQLDatabase
|
12 |
from langchain.prompts import PromptTemplate
|
13 |
from langchain.chains import LLMChain
|
14 |
|
|
|
59 |
agent_executor = create_sql_agent(
|
60 |
llm=llm,
|
61 |
toolkit=toolkit,
|
62 |
+
verbose=True,
|
63 |
+
return_intermediate_steps=True
|
64 |
)
|
65 |
|
66 |
# Step 4: Define the callback function
|
|
|
74 |
|
75 |
# Use the agent to get the response
|
76 |
with st.spinner("Processing..."):
|
77 |
+
response = agent_executor(user_prompt)
|
78 |
+
|
79 |
+
# Extract the final answer and the data from intermediate steps
|
80 |
+
final_answer = response['output']
|
81 |
+
intermediate_steps = response['intermediate_steps']
|
82 |
+
|
83 |
+
# Initialize an empty list for SQL result
|
84 |
+
sql_result = []
|
85 |
+
|
86 |
+
# Find the SQL query result
|
87 |
+
for step in intermediate_steps:
|
88 |
+
if step[0].tool == 'sql_db_query':
|
89 |
+
# The result is a string representation of a list
|
90 |
+
sql_result = ast.literal_eval(step[1])
|
91 |
+
break
|
92 |
+
|
93 |
+
# Convert the result to a DataFrame for better formatting
|
94 |
+
if sql_result:
|
95 |
+
# Adjust the column names based on your query
|
96 |
+
df_result = pd.DataFrame(sql_result, columns=['Region', 'Total_Sales'])
|
97 |
+
sql_result_formatted = df_result.to_markdown(index=False)
|
98 |
+
else:
|
99 |
+
sql_result_formatted = "No results found."
|
100 |
+
|
101 |
+
# Include the data in the final answer
|
102 |
+
assistant_response = f"{final_answer}\n\n**Query Result:**\n{sql_result_formatted}"
|
103 |
|
104 |
# Append the assistant's response to the history
|
105 |
+
st.session_state.history.append({"role": "assistant", "content": assistant_response})
|
106 |
|
107 |
# Generate insights based on the response
|
108 |
insights_template = """
|
|
|
118 |
insights_prompt = PromptTemplate(template=insights_template, input_variables=['question', 'response'])
|
119 |
insights_chain = LLMChain(llm=llm, prompt=insights_prompt)
|
120 |
|
121 |
+
insights = insights_chain.run({'question': user_prompt, 'response': assistant_response})
|
122 |
|
123 |
# Append the assistant's insights to the history
|
124 |
st.session_state.history.append({"role": "assistant", "content": insights})
|