|
import streamlit as st |
|
import pandas as pd |
|
import sqlite3 |
|
import tempfile |
|
from fpdf import FPDF |
|
import os |
|
import re |
|
import json |
|
from pathlib import Path |
|
import plotly.express as px |
|
from datetime import datetime, timezone |
|
from crewai import Agent, Crew, Process, Task |
|
from crewai.tools import tool |
|
from langchain_groq import ChatGroq |
|
from langchain_openai import ChatOpenAI |
|
from langchain.schema.output import LLMResult |
|
from langchain_community.tools.sql_database.tool import ( |
|
InfoSQLDatabaseTool, |
|
ListSQLDatabaseTool, |
|
QuerySQLCheckerTool, |
|
QuerySQLDataBaseTool, |
|
) |
|
from langchain_community.utilities.sql_database import SQLDatabase |
|
from datasets import load_dataset |
|
import tempfile |
|
|
|
st.title("SQL-RAG Using CrewAI π") |
|
st.write("Analyze datasets using natural language queries powered by SQL and CrewAI.") |
|
|
|
|
|
llm = None |
|
|
|
|
|
model_choice = st.radio("Select LLM", ["GPT-4o", "llama-3.3-70b"], index=0, horizontal=True) |
|
|
|
|
|
groq_api_key = os.getenv("GROQ_API_KEY") |
|
openai_api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
if model_choice == "llama-3.3-70b": |
|
if not groq_api_key: |
|
st.error("Groq API key is missing. Please set the GROQ_API_KEY environment variable.") |
|
llm = None |
|
else: |
|
llm = ChatGroq(groq_api_key=groq_api_key, model="groq/llama-3.3-70b-versatile") |
|
elif model_choice == "GPT-4o": |
|
if not openai_api_key: |
|
st.error("OpenAI API key is missing. Please set the OPENAI_API_KEY environment variable.") |
|
llm = None |
|
else: |
|
llm = ChatOpenAI(api_key=openai_api_key, model="gpt-4o") |
|
|
|
|
|
if "df" not in st.session_state: |
|
st.session_state.df = None |
|
if "show_preview" not in st.session_state: |
|
st.session_state.show_preview = False |
|
|
|
|
|
input_option = st.radio("Select Dataset Input:", ["Use Hugging Face Dataset", "Upload CSV File"]) |
|
|
|
if input_option == "Use Hugging Face Dataset": |
|
dataset_name = st.text_input("Enter Hugging Face Dataset Name:", value="Einstellung/demo-salaries") |
|
if st.button("Load Dataset"): |
|
try: |
|
with st.spinner("Loading dataset..."): |
|
dataset = load_dataset(dataset_name, split="train") |
|
st.session_state.df = pd.DataFrame(dataset) |
|
st.session_state.show_preview = True |
|
st.success(f"Dataset '{dataset_name}' loaded successfully!") |
|
except Exception as e: |
|
st.error(f"Error: {e}") |
|
|
|
elif input_option == "Upload CSV File": |
|
uploaded_file = st.file_uploader("Upload CSV File:", type=["csv"]) |
|
if uploaded_file: |
|
try: |
|
st.session_state.df = pd.read_csv(uploaded_file) |
|
st.session_state.show_preview = True |
|
st.success("File uploaded successfully!") |
|
except Exception as e: |
|
st.error(f"Error loading file: {e}") |
|
|
|
|
|
if st.session_state.df is not None and st.session_state.show_preview: |
|
st.subheader("π Dataset Preview") |
|
st.dataframe(st.session_state.df.head()) |
|
|
|
|
|
def ask_gpt4o_for_visualization(query, df, llm): |
|
columns = ', '.join(df.columns) |
|
prompt = f""" |
|
Analyze the query and suggest the best visualization. |
|
Query: "{query}" |
|
Available Columns: {columns} |
|
Respond in this JSON format: |
|
{{ |
|
"chart_type": "bar/box/line/scatter", |
|
"x_axis": "column_name", |
|
"y_axis": "column_name", |
|
"group_by": "optional_column_name" |
|
}} |
|
""" |
|
response = llm.generate(prompt) |
|
try: |
|
return json.loads(response) |
|
except json.JSONDecodeError: |
|
st.error("β οΈ GPT-4o failed to generate a valid suggestion.") |
|
return None |
|
|
|
|
|
def generate_visualization(suggestion, df): |
|
chart_type = suggestion.get("chart_type", "bar").lower() |
|
x_axis = suggestion.get("x_axis") |
|
y_axis = suggestion.get("y_axis") |
|
group_by = suggestion.get("group_by") |
|
|
|
|
|
if not y_axis: |
|
numeric_columns = df.select_dtypes(include='number').columns.tolist() |
|
|
|
if x_axis in numeric_columns: |
|
|
|
numeric_columns.remove(x_axis) |
|
|
|
|
|
y_axis = numeric_columns[0] if numeric_columns else None |
|
|
|
|
|
if not x_axis or not y_axis: |
|
st.warning("β οΈ Unable to determine relevant columns for visualization.") |
|
return None |
|
|
|
|
|
plotly_function = getattr(px, chart_type, None) |
|
|
|
if not plotly_function: |
|
st.warning(f"β οΈ Unsupported chart type '{chart_type}' suggested by GPT-4o.") |
|
return None |
|
|
|
|
|
plot_args = {"data_frame": df, "x": x_axis, "y": y_axis} |
|
if group_by and group_by in df.columns: |
|
plot_args["color"] = group_by |
|
|
|
try: |
|
|
|
fig = plotly_function(**plot_args) |
|
fig.update_layout( |
|
title=f"{chart_type.title()} Plot of {y_axis.replace('_', ' ').title()} by {x_axis.replace('_', ' ').title()}", |
|
xaxis_title=x_axis.replace('_', ' ').title(), |
|
yaxis_title=y_axis.replace('_', ' ').title(), |
|
) |
|
|
|
|
|
fig = add_stats_to_figure(fig, df, y_axis, chart_type) |
|
|
|
return fig |
|
|
|
except Exception as e: |
|
st.error(f"β οΈ Failed to generate visualization: {e}") |
|
return None |
|
|
|
|
|
def add_stats_to_figure(fig, df, y_axis, chart_type): |
|
|
|
min_val = df[y_axis].min() |
|
max_val = df[y_axis].max() |
|
avg_val = df[y_axis].mean() |
|
median_val = df[y_axis].median() |
|
std_dev_val = df[y_axis].std() |
|
|
|
|
|
stats_text = ( |
|
f"π **Statistics**\n\n" |
|
f"- **Min:** ${min_val:,.2f}\n" |
|
f"- **Max:** ${max_val:,.2f}\n" |
|
f"- **Average:** ${avg_val:,.2f}\n" |
|
f"- **Median:** ${median_val:,.2f}\n" |
|
f"- **Std Dev:** ${std_dev_val:,.2f}" |
|
) |
|
|
|
|
|
if chart_type in ["bar", "line", "scatter"]: |
|
|
|
fig.add_annotation( |
|
text=stats_text, |
|
xref="paper", yref="paper", |
|
x=1.05, y=1, |
|
showarrow=False, |
|
align="left", |
|
font=dict(size=12, color="black"), |
|
bordercolor="black", |
|
borderwidth=1, |
|
bgcolor="rgba(255, 255, 255, 0.8)" |
|
) |
|
|
|
|
|
fig.add_hline(y=min_val, line_dash="dot", line_color="red", annotation_text="Min", annotation_position="bottom right") |
|
fig.add_hline(y=median_val, line_dash="dash", line_color="orange", annotation_text="Median", annotation_position="top right") |
|
fig.add_hline(y=avg_val, line_dash="dashdot", line_color="green", annotation_text="Avg", annotation_position="top right") |
|
fig.add_hline(y=max_val, line_dash="dot", line_color="blue", annotation_text="Max", annotation_position="top right") |
|
|
|
elif chart_type == "box": |
|
|
|
pass |
|
|
|
elif chart_type == "pie": |
|
|
|
st.info("π Pie charts focus on proportions. No additional stats displayed.") |
|
|
|
else: |
|
st.warning(f"β οΈ No stats added for unsupported chart type: {chart_type}") |
|
|
|
return fig |
|
|
|
|
|
|
|
def create_text_report_with_viz_temp(report, conclusion, visualizations): |
|
content = f"### Analysis Report\n\n{report}\n\n### Visualizations\n" |
|
|
|
for i, fig in enumerate(visualizations, start=1): |
|
fig_title = fig.layout.title.text if fig.layout.title.text else f"Visualization {i}" |
|
x_axis = fig.layout.xaxis.title.text if fig.layout.xaxis.title.text else "X-axis" |
|
y_axis = fig.layout.yaxis.title.text if fig.layout.yaxis.title.text else "Y-axis" |
|
|
|
content += f"\n{i}. {fig_title}\n" |
|
content += f" - X-axis: {x_axis}\n" |
|
content += f" - Y-axis: {y_axis}\n" |
|
|
|
if fig.data: |
|
trace_types = set(trace.type for trace in fig.data) |
|
content += f" - Chart Type(s): {', '.join(trace_types)}\n" |
|
else: |
|
content += " - No data available in this visualization.\n" |
|
|
|
content += f"\n\n\n{conclusion}" |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w', encoding='utf-8') as temp_txt: |
|
temp_txt.write(content) |
|
return temp_txt.name |
|
|
|
|
|
|
|
|
|
def create_pdf_report_with_viz(report, conclusion, visualizations): |
|
pdf = FPDF() |
|
pdf.set_auto_page_break(auto=True, margin=15) |
|
pdf.add_page() |
|
pdf.set_font("Arial", size=12) |
|
|
|
|
|
pdf.set_font("Arial", style="B", size=18) |
|
pdf.cell(0, 10, "π Analysis Report", ln=True, align="C") |
|
pdf.ln(10) |
|
|
|
|
|
pdf.set_font("Arial", style="B", size=14) |
|
pdf.cell(0, 10, "Analysis", ln=True) |
|
pdf.set_font("Arial", size=12) |
|
pdf.multi_cell(0, 10, report) |
|
|
|
pdf.ln(10) |
|
pdf.set_font("Arial", style="B", size=14) |
|
pdf.cell(0, 10, "Conclusion", ln=True) |
|
pdf.set_font("Arial", size=12) |
|
pdf.multi_cell(0, 10, conclusion) |
|
|
|
|
|
pdf.add_page() |
|
pdf.set_font("Arial", style="B", size=16) |
|
pdf.cell(0, 10, "π Visualizations", ln=True) |
|
pdf.ln(5) |
|
|
|
with tempfile.TemporaryDirectory() as temp_dir: |
|
for i, fig in enumerate(visualizations, start=1): |
|
fig_title = fig.layout.title.text if fig.layout.title.text else f"Visualization {i}" |
|
x_axis = fig.layout.xaxis.title.text if fig.layout.xaxis.title.text else "X-axis" |
|
y_axis = fig.layout.yaxis.title.text if fig.layout.yaxis.title.text else "Y-axis" |
|
|
|
|
|
img_path = os.path.join(temp_dir, f"viz_{i}.png") |
|
fig.write_image(img_path) |
|
|
|
|
|
pdf.set_font("Arial", style="B", size=14) |
|
pdf.multi_cell(0, 10, f"{i}. {fig_title}") |
|
pdf.set_font("Arial", size=12) |
|
pdf.multi_cell(0, 10, f"X-axis: {x_axis} | Y-axis: {y_axis}") |
|
pdf.ln(3) |
|
|
|
|
|
pdf.image(img_path, w=170) |
|
pdf.ln(10) |
|
|
|
|
|
temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") |
|
pdf.output(temp_pdf.name) |
|
|
|
return temp_pdf |
|
|
|
def escape_markdown(text): |
|
|
|
text = str(text) |
|
|
|
escape_chars = r"(\*|_|`|~)" |
|
return re.sub(escape_chars, r"\\\1", text) |
|
|
|
|
|
if st.session_state.df is not None: |
|
temp_dir = tempfile.TemporaryDirectory() |
|
db_path = os.path.join(temp_dir.name, "data.db") |
|
connection = sqlite3.connect(db_path) |
|
st.session_state.df.to_sql("salaries", connection, if_exists="replace", index=False) |
|
db = SQLDatabase.from_uri(f"sqlite:///{db_path}") |
|
|
|
@tool("list_tables") |
|
def list_tables() -> str: |
|
"""List all tables in the database.""" |
|
return ListSQLDatabaseTool(db=db).invoke("") |
|
|
|
@tool("tables_schema") |
|
def tables_schema(tables: str) -> str: |
|
"""Get the schema and sample rows for the specified tables.""" |
|
return InfoSQLDatabaseTool(db=db).invoke(tables) |
|
|
|
@tool("execute_sql") |
|
def execute_sql(sql_query: str) -> str: |
|
"""Execute a SQL query against the database and return the results.""" |
|
return QuerySQLDataBaseTool(db=db).invoke(sql_query) |
|
|
|
@tool("check_sql") |
|
def check_sql(sql_query: str) -> str: |
|
"""Validate the SQL query syntax and structure before execution.""" |
|
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query}) |
|
|
|
|
|
sql_dev = Agent( |
|
role="Senior Database Developer", |
|
goal="Extract data using optimized SQL queries.", |
|
backstory="An expert in writing optimized SQL queries for complex databases.", |
|
llm=llm, |
|
tools=[list_tables, tables_schema, execute_sql, check_sql], |
|
) |
|
|
|
data_analyst = Agent( |
|
role="Senior Data Analyst", |
|
goal="Analyze the data and produce insights.", |
|
backstory="A seasoned analyst who identifies trends and patterns in datasets.", |
|
llm=llm, |
|
) |
|
|
|
report_writer = Agent( |
|
role="Technical Report Writer", |
|
goal="Write a structured report with Introduction and Key Insights. DO NOT include any Conclusion or Summary.", |
|
backstory="Specializes in detailed analytical reports without conclusions.", |
|
llm=llm, |
|
) |
|
|
|
conclusion_writer = Agent( |
|
role="Conclusion Specialist", |
|
goal="Summarize findings into a clear and concise 3-5 line Conclusion highlighting only the most important insights.", |
|
backstory="An expert in crafting impactful and clear conclusions.", |
|
llm=llm, |
|
) |
|
|
|
|
|
extract_data = Task( |
|
description="Extract data based on the query: {query}.", |
|
expected_output="Database results matching the query.", |
|
agent=sql_dev, |
|
) |
|
|
|
analyze_data = Task( |
|
description="Analyze the extracted data for query: {query}.", |
|
expected_output="Key Insights and Analysis without any Introduction or Conclusion.", |
|
agent=data_analyst, |
|
context=[extract_data], |
|
) |
|
|
|
write_report = Task( |
|
description="Write the analysis report with Introduction and Key Insights. DO NOT include any Conclusion or Summary.", |
|
expected_output="Markdown-formatted report excluding Conclusion.", |
|
agent=report_writer, |
|
context=[analyze_data], |
|
) |
|
|
|
write_conclusion = Task( |
|
description="Summarize the key findings in 3-5 impactful lines, highlighting the maximum, minimum, and average salaries." |
|
"Emphasize significant insights on salary distribution and influential compensation trends for strategic decision-making.", |
|
expected_output="Markdown-formatted Conclusion section with key insights and statistics.", |
|
agent=conclusion_writer, |
|
context=[analyze_data], |
|
) |
|
|
|
|
|
|
|
|
|
crew_report = Crew( |
|
agents=[sql_dev, data_analyst, report_writer], |
|
tasks=[extract_data, analyze_data, write_report], |
|
process=Process.sequential, |
|
verbose=True, |
|
) |
|
|
|
crew_conclusion = Crew( |
|
agents=[data_analyst, conclusion_writer], |
|
tasks=[write_conclusion], |
|
process=Process.sequential, |
|
verbose=True, |
|
) |
|
|
|
|
|
tab1, tab2 = st.tabs(["π Query Insights + Viz", "π Full Data Viz"]) |
|
|
|
|
|
with tab1: |
|
query = st.text_area("Enter Query:", value="Provide insights into the salary of a Principal Data Scientist.") |
|
if st.button("Submit Query"): |
|
with st.spinner("Processing query..."): |
|
|
|
report_inputs = {"query": query + " Provide detailed analysis but DO NOT include Conclusion."} |
|
report_result = crew_report.kickoff(inputs=report_inputs) |
|
|
|
|
|
conclusion_inputs = {"query": query + " Provide ONLY the most important insights in 3-5 concise lines."} |
|
conclusion_result = crew_conclusion.kickoff(inputs=conclusion_inputs) |
|
|
|
|
|
|
|
st.markdown(report_result if report_result else "β οΈ No Report Generated.") |
|
|
|
|
|
|
|
|
|
|
|
st.markdown("### Visual Insights") |
|
|
|
|
|
|
|
|
|
|
|
safe_conclusion = escape_markdown(conclusion_result if conclusion_result else "β οΈ No Conclusion Generated.") |
|
st.markdown(safe_conclusion) |
|
|
|
|
|
with tab2: |
|
st.subheader("π Comprehensive Data Visualizations") |
|
|
|
fig1 = px.histogram(st.session_state.df, x="job_title", title="Job Title Frequency") |
|
st.plotly_chart(fig1) |
|
|
|
fig2 = px.bar( |
|
st.session_state.df.groupby("experience_level")["salary_in_usd"].mean().reset_index(), |
|
x="experience_level", y="salary_in_usd", |
|
title="Average Salary by Experience Level" |
|
) |
|
st.plotly_chart(fig2) |
|
|
|
fig3 = px.box(st.session_state.df, x="employment_type", y="salary_in_usd", |
|
title="Salary Distribution by Employment Type") |
|
st.plotly_chart(fig3) |
|
|
|
temp_dir.cleanup() |
|
else: |
|
st.info("Please load a dataset to proceed.") |
|
|
|
|
|
|
|
with st.sidebar: |
|
st.header("π Reference:") |
|
st.markdown("[SQL Agents w CrewAI & Llama 3 - Plaban Nayak](https://github.com/plaban1981/Agents/blob/main/SQL_Agents_with_CrewAI_and_Llama_3.ipynb)") |