import streamlit as st import pandas as pd import sqlite3 import openai from transformers import AutoTokenizer, AutoModelForCausalLM from langchain import OpenAI from langchain.agents import create_sql_agent from langchain.sql_database import SQLDatabase from langchain.chains import RetrievalQA from langchain.document_loaders import CSVLoader from langchain.vectorstores import FAISS from langchain.embeddings.openai import OpenAIEmbeddings import sqlparse # OpenAI API key (ensure it is securely stored) openai.api_key = os.getenv("OPENAI_API_KEY") # Step 1: Upload CSV data file (or use default) csv_file = st.file_uploader("Upload your CSV file", type=["csv"]) if csv_file is None: data = pd.read_csv("default_data.csv") # Use default CSV if no file is uploaded st.write("Using default data.csv file.") else: data = pd.read_csv(csv_file) st.write(f"Data Preview ({csv_file.name}):") st.dataframe(data.head()) # Step 2: Load CSV data into SQLite database with dynamic table name conn = sqlite3.connect(':memory:') # Use an in-memory SQLite database # Dynamically name the table based on the uploaded file name or fallback to a default name table_name = csv_file.name.split('.')[0] if csv_file else "default_table" data.to_sql(table_name, conn, index=False, if_exists='replace') # SQL table metadata (for validation and schema) valid_columns = list(data.columns) # Step 3: Use LLaMA for context retrieval (RAG) tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b") llama_model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b") # Step 4: Implement RAG with FAISS for vectorized document retrieval embeddings = OpenAIEmbeddings() # You can use other embeddings if preferred loader = CSVLoader(file_path=csv_file.name if csv_file else "default_data.csv") documents = loader.load() # Use FAISS for retrieval and document search vector_store = FAISS.from_documents(documents, embeddings) retriever = vector_store.as_retriever() rag_chain = RetrievalQA.from_chain_type(llama_model, retriever=retriever) # Step 5: OpenAI for SQL query generation based on user prompt and context openai_llm = OpenAI(temperature=0) db = SQLDatabase.from_uri('sqlite:///:memory:') # Create an SQLite database for LangChain db.raw_connection = conn # Use the in-memory connection for LangChain sql_agent = create_sql_agent(openai_llm, db, verbose=True) # Step 6: Validate and Execute the SQL Query def validate_sql(query, valid_columns): """Validates the SQL query by ensuring it references only valid columns.""" for column in valid_columns: if column not in query: return False return True # Step 7: SQL Validation with `sqlparse` def validate_sql_with_sqlparse(query): """Validates SQL syntax using sqlparse.""" parsed_query = sqlparse.parse(query) return len(parsed_query) > 0 # Step 8: Get user prompt, retrieve context, and generate SQL query user_prompt = st.text_input("Enter your natural language prompt:") if user_prompt: try: # Step 9: Retrieve relevant context using LLaMA RAG rag_result = rag_chain.run(user_prompt) st.write(f"Retrieved Context from LLaMA RAG: {rag_result}") # Step 10: Generate SQL query with OpenAI based on user prompt and retrieved context query_input = f"{user_prompt} {rag_result}" generated_sql = sql_agent.run(query_input) st.write(f"Generated SQL Query: {generated_sql}") # Step 11: Validate the SQL query before execution if not validate_sql_with_sqlparse(generated_sql): st.write("Generated SQL is not valid.") elif not validate_sql(generated_sql, valid_columns): st.write("Generated SQL references invalid columns.") else: # Step 12: Execute the SQL query result = pd.read_sql(generated_sql, conn) st.write("Query Results:") st.dataframe(result) except Exception as e: st.write(f"Error: {e}")