|
import streamlit as st |
|
import pandas as pd |
|
import sqlite3 |
|
import os |
|
import json |
|
from pathlib import Path |
|
from datetime import datetime, timezone |
|
from crewai import Agent, Crew, Process, Task |
|
from crewai_tools import tool |
|
from langchain_groq import ChatGroq |
|
from langchain.schema.output import LLMResult |
|
from langchain_core.callbacks.base import BaseCallbackHandler |
|
from langchain_community.tools.sql_database.tool import ( |
|
InfoSQLDatabaseTool, |
|
ListSQLDatabaseTool, |
|
QuerySQLCheckerTool, |
|
QuerySQLDataBaseTool, |
|
) |
|
from langchain_community.utilities.sql_database import SQLDatabase |
|
from datasets import load_dataset |
|
import tempfile |
|
|
|
|
|
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "") |
|
|
|
|
|
class LLMCallbackHandler(BaseCallbackHandler): |
|
def __init__(self, log_path: Path): |
|
self.log_path = log_path |
|
|
|
def on_llm_start(self, serialized, prompts, **kwargs): |
|
with self.log_path.open("a", encoding="utf-8") as file: |
|
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n") |
|
|
|
def on_llm_end(self, response: LLMResult, **kwargs): |
|
generation = response.generations[-1][-1].message.content |
|
with self.log_path.open("a", encoding="utf-8") as file: |
|
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n") |
|
|
|
llm = ChatGroq( |
|
temperature=0, |
|
model_name="groq/llama-3.3-70b-versatile", |
|
max_tokens=1024, |
|
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))], |
|
) |
|
|
|
st.title("SQL-RAG Using CrewAI π") |
|
st.write("Analyze datasets using natural language queries powered by SQL and CrewAI.") |
|
|
|
|
|
if "df" not in st.session_state: |
|
st.session_state.df = None |
|
|
|
|
|
input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"]) |
|
if input_option == "Use Hugging Face Dataset": |
|
dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries") |
|
if st.button("Load Dataset"): |
|
try: |
|
with st.spinner("Loading dataset..."): |
|
dataset = load_dataset(dataset_name, split="train") |
|
st.session_state.df = pd.DataFrame(dataset) |
|
st.success(f"Dataset '{dataset_name}' loaded successfully!") |
|
st.dataframe(st.session_state.df.head()) |
|
except Exception as e: |
|
st.error(f"Error: {e}") |
|
elif input_option == "Upload CSV File": |
|
uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"]) |
|
if uploaded_file: |
|
st.session_state.df = pd.read_csv(uploaded_file) |
|
st.success("File uploaded successfully!") |
|
st.dataframe(st.session_state.df.head()) |
|
|
|
|
|
if st.session_state.df is not None: |
|
temp_dir = tempfile.TemporaryDirectory() |
|
db_path = os.path.join(temp_dir.name, "data.db") |
|
connection = sqlite3.connect(db_path) |
|
st.session_state.df.to_sql("salaries", connection, if_exists="replace", index=False) |
|
db = SQLDatabase.from_uri(f"sqlite:///{db_path}") |
|
|
|
@tool("list_tables") |
|
def list_tables() -> str: |
|
"""List all tables in the database.""" |
|
return ListSQLDatabaseTool(db=db).invoke("") |
|
|
|
@tool("tables_schema") |
|
def tables_schema(tables: str) -> str: |
|
"""Get schema and sample rows for given tables.""" |
|
return InfoSQLDatabaseTool(db=db).invoke(tables) |
|
|
|
@tool("execute_sql") |
|
def execute_sql(sql_query: str) -> str: |
|
"""Execute a SQL query against the database.""" |
|
return QuerySQLDataBaseTool(db=db).invoke(sql_query) |
|
|
|
@tool("check_sql") |
|
def check_sql(sql_query: str) -> str: |
|
"""Check the validity of a SQL query.""" |
|
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query}) |
|
|
|
sql_dev = Agent( |
|
role="Senior Database Developer", |
|
goal="Extract data using optimized SQL queries.", |
|
backstory="An expert in writing optimized SQL queries for complex databases.", |
|
llm=llm, |
|
tools=[list_tables, tables_schema, execute_sql, check_sql], |
|
) |
|
|
|
data_analyst = Agent( |
|
role="Senior Data Analyst", |
|
goal="Analyze the data and produce insights.", |
|
backstory="A seasoned analyst who identifies trends and patterns in datasets.", |
|
llm=llm, |
|
) |
|
|
|
report_writer = Agent( |
|
role="Technical Report Writer", |
|
goal="Summarize the insights into a clear report.", |
|
backstory="An expert in summarizing data insights into readable reports.", |
|
llm=llm, |
|
) |
|
|
|
extract_data = Task( |
|
description="Extract data based on the query: {query}.", |
|
expected_output="Database results matching the query.", |
|
agent=sql_dev, |
|
) |
|
|
|
analyze_data = Task( |
|
description="Analyze the extracted data for query: {query}.", |
|
expected_output="Analysis text summarizing findings.", |
|
agent=data_analyst, |
|
context=[extract_data], |
|
) |
|
|
|
write_report = Task( |
|
description="Summarize the analysis into an executive report.", |
|
expected_output="Markdown report of insights.", |
|
agent=report_writer, |
|
context=[analyze_data], |
|
) |
|
|
|
crew = Crew( |
|
agents=[sql_dev, data_analyst, report_writer], |
|
tasks=[extract_data, analyze_data, write_report], |
|
process=Process.sequential, |
|
verbose=True, |
|
) |
|
|
|
query = st.text_area("Enter Query:", placeholder="e.g., 'What is the average salary for senior employees?'") |
|
if st.button("Submit Query"): |
|
with st.spinner("Processing query..."): |
|
inputs = {"query": query} |
|
result = crew.kickoff(inputs=inputs) |
|
st.markdown("### Analysis Report:") |
|
st.markdown(result) |
|
|
|
temp_dir.cleanup() |
|
else: |
|
st.info("Please load a dataset to proceed.") |