File size: 5,357 Bytes
c86cb4d c9e66b7 c86cb4d a849379 c86cb4d a849379 c9e66b7 a849379 c9e66b7 a849379 c9e66b7 a849379 c9e66b7 a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d c9e66b7 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d a849379 c86cb4d c9e66b7 a849379 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import streamlit as st
import pandas as pd
import sqlite3
import os
import json
from pathlib import Path
from datetime import datetime, timezone
from crewai import Agent, Crew, Process, Task
from crewai_tools import tool
from langchain_groq import ChatGroq
from langchain.schema.output import LLMResult
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_community.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.utilities.sql_database import SQLDatabase
from datasets import load_dataset
import tempfile
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
# LLM Logging
class LLMCallbackHandler(BaseCallbackHandler):
def __init__(self, log_path: Path):
self.log_path = log_path
def on_llm_start(self, serialized, prompts, **kwargs):
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n")
def on_llm_end(self, response: LLMResult, **kwargs):
generation = response.generations[-1][-1].message.content
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n")
llm = ChatGroq(
temperature=0,
model_name="mixtral-8x7b-32768",
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))],
)
st.title("SQL-RAG Using CrewAI π")
st.write("Analyze datasets using natural language queries powered by SQL and CrewAI.")
# Data Input Options
input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"])
df = None
if input_option == "Use Hugging Face Dataset":
dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries")
if st.button("Load Dataset"):
try:
with st.spinner("Loading Hugging Face dataset..."):
dataset = load_dataset(dataset_name, split="train")
df = pd.DataFrame(dataset)
st.success(f"Dataset '{dataset_name}' loaded successfully!")
st.dataframe(df.head())
except Exception as e:
st.error(f"Error loading dataset: {e}")
else:
uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.success("File uploaded successfully!")
st.dataframe(df.head())
# SQL-RAG and Query Workflow
if df is not None:
temp_dir = tempfile.TemporaryDirectory()
db_path = os.path.join(temp_dir.name, "data.db")
connection = sqlite3.connect(db_path)
df.to_sql("salaries", connection, if_exists="replace", index=False)
db = SQLDatabase.from_uri(f"sqlite:///{db_path}")
@tool("list_tables")
def list_tables() -> str:
"""List all tables in the database."""
return ListSQLDatabaseTool(db=db).invoke("")
@tool("tables_schema")
def tables_schema(tables: str) -> str:
"""Return schema and example rows for given tables."""
return InfoSQLDatabaseTool(db=db).invoke(tables)
@tool("execute_sql")
def execute_sql(sql_query: str) -> str:
"""Execute a SQL query and return results."""
return QuerySQLDataBaseTool(db=db).invoke(sql_query)
@tool("check_sql")
def check_sql(sql_query: str) -> str:
"""Check SQL query validity."""
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
sql_dev = Agent(
role="Senior Database Developer",
goal="Construct and execute SQL queries.",
llm=llm,
tools=[list_tables, tables_schema, execute_sql, check_sql],
)
data_analyst = Agent(
role="Senior Data Analyst",
goal="Analyze the data returned from SQL queries.",
llm=llm,
)
report_writer = Agent(
role="Senior Report Editor",
goal="Summarize the analysis into a short report.",
llm=llm,
)
extract_data = Task(
description="Extract data for the query: {query}.",
expected_output="Database query results.",
agent=sql_dev,
)
analyze_data = Task(
description="Analyze the query results for: {query}.",
expected_output="Detailed analysis report.",
agent=data_analyst,
context=[extract_data],
)
write_report = Task(
description="Summarize the analysis into a brief executive summary.",
expected_output="Markdown report.",
agent=report_writer,
context=[analyze_data],
)
crew = Crew(
agents=[sql_dev, data_analyst, report_writer],
tasks=[extract_data, analyze_data, write_report],
process=Process.sequential,
verbose=2,
)
query = st.text_area("Enter Query:", placeholder="e.g., 'What is the average salary by experience level?'")
if st.button("Submit Query"):
with st.spinner("Processing your query with CrewAI..."):
inputs = {"query": query}
result = crew.kickoff(inputs=inputs)
st.markdown("### Analysis Report:")
st.markdown(result)
temp_dir.cleanup()
else:
st.info("Load a dataset to proceed.")
|