import os import gradio as gr import sqlite3 import sqlparse import requests import time import re import platform import openai import random from transformers import ( AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, ) # Additional Firebase imports import firebase_admin from firebase_admin import credentials, firestore import json import base64 import torch print(f"Running on {platform.system()}") if platform.system() == "Windows" or platform.system() == "Darwin": from dotenv import load_dotenv load_dotenv() quantized_model = "richardr1126/spider-skeleton-wizard-coder-8bit" merged_model = "richardr1126/spider-skeleton-wizard-coder-merged" initial_model = "WizardLM/WizardCoder-15B-V1.0" lora_model = "richardr1126/spider-skeleton-wizard-coder-qlora" dataset = "richardr1126/spider-skeleton-context-instruct" model_name = os.getenv("HF_MODEL_NAME", None) tok = AutoTokenizer.from_pretrained(model_name) max_new_tokens = 1024 print(f"Starting to load the model {model_name}") m = AutoModelForCausalLM.from_pretrained( model_name, device_map=0, #load_in_8bit=True, ) m.config.pad_token_id = m.config.eos_token_id m.generation_config.pad_token_id = m.config.eos_token_id print(f"Successfully loaded the model {model_name} into memory") ################# Firebase code ################# # Initialize Firebase base64_string = os.getenv('FIREBASE') base64_bytes = base64_string.encode('utf-8') json_bytes = base64.b64decode(base64_bytes) json_data = json_bytes.decode('utf-8') firebase_auth = json.loads(json_data) # Load credentials and initialize Firestore cred = credentials.Certificate(firebase_auth) firebase_admin.initialize_app(cred) db = firestore.client() def log_message_to_firestore(input_message, db_info, temperature, response_text): doc_ref = db.collection('logs').document() log_data = { 'timestamp': firestore.SERVER_TIMESTAMP, 'temperature': temperature, 'db_info': db_info, 'input': input_message, 'output': response_text, } doc_ref.set(log_data) rated_outputs = set() # set to store already rated outputs def log_rating_to_firestore(input_message, db_info, temperature, response_text, rating): global rated_outputs output_id = f"{input_message} {db_info} {response_text} {temperature}" if output_id in rated_outputs: gr.Warning("You've already rated this output!") return if not input_message or not response_text or not rating: gr.Info("You haven't asked a question yet!") return rated_outputs.add(output_id) doc_ref = db.collection('ratings').document() log_data = { 'timestamp': firestore.SERVER_TIMESTAMP, 'temperature': temperature, 'db_info': db_info, 'input': input_message, 'output': response_text, 'rating': rating, } doc_ref.set(log_data) gr.Info("Thanks for your feedback!") ############### End Firebase code ############### def format(text): # Split the text by "|", and get the last element in the list which should be the final query try: final_query = text.split("|")[1].strip() except Exception: final_query = text try: # Attempt to format SQL query using sqlparse formatted_query = sqlparse.format(final_query, reindent=True, keyword_case='upper') except Exception: # If formatting fails, use the original, unformatted query formatted_query = final_query # Convert SQL to markdown (not required, but just to show how to use the markdown module) final_query_markdown = f"{formatted_query}" return final_query_markdown def extract_db_code(text): pattern = r'```(?:\w+)?\s?(.*?)```' matches = re.findall(pattern, text, re.DOTALL) return [match.strip() for match in matches] def generate_dummy_db(db_info, question): pre_prompt = "Generate a SQLite database with dummy data for this database. Make sure you add dummy data relevant to the question and don't write any SELECT statements or actual queries.\n\n" prompt = pre_prompt + db_info + "\n\nQuestion: " + question while True: try: response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": "You are a SQLite dummy database generator. 1. You will create the specified dummy database. 2. Insert the dummy data. and 3. Output the only the code in a SQL code block."}, {"role": "user", "content": prompt} ], #temperature=0.7, ) response_text = response['choices'][0]['message']['content'] db_code = extract_db_code(response_text) print(db_code) return db_code except Exception as e: print(f'Error occurred: {str(e)}') print('Waiting for 20 seconds before retrying...') time.sleep(20) def test_query_on_dummy_db(db_code, query): try: # Connect to an SQLite database in memory conn = sqlite3.connect(':memory:') cursor = conn.cursor() # Iterate over each extracted SQL block and split them into individual commands for sql_block in db_code: statements = sqlparse.split(sql_block) # Execute each SQL command for statement in statements: if statement: cursor.execute(statement) # Run the provided test query against the database cursor.execute(query) print(cursor.fetchall()) # Close the connection conn.close() # If everything executed without errors, return True return True except Exception as e: print(f"Error encountered: {e}") return False def generate(input_message: str, db_info="", temperature=0.2, top_p=0.9, top_k=0, repetition_penalty=1.08, format_sql=True, log=False, num_return_sequences=1, num_beams=1, do_sample=False): if num_return_sequences > num_beams: gr.Warning("Num return sequences must be less than or equal to num beams.") stop_token_ids = tok.convert_tokens_to_ids(["###"]) class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: for stop_id in stop_token_ids: if input_ids[0][-1] == stop_id: return True return False stop = StopOnTokens() # Format the user's input message messages = f"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n\nConvert text to sql: {input_message} {db_info}\n\n### Response:\n\n" input_ids = tok(messages, return_tensors="pt").input_ids input_ids = input_ids.to(m.device) generate_kwargs = dict( input_ids=input_ids, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, #streamer=streamer, stopping_criteria=StoppingCriteriaList([stop]), num_return_sequences=num_return_sequences, num_beams=num_beams, do_sample=do_sample, ) tokens = m.generate(**generate_kwargs) db_code = None if (num_return_sequences > 1): db_code = generate_dummy_db(db_info, input_message) responses = [] for response in tokens: response_text = tok.decode(response, skip_special_tokens=True) # Only take what comes after ### Response: response_text = response_text.split("### Response:")[1].strip() query = format(response_text) if format_sql else response_text if (num_return_sequences > 1): query = query.replace("\n", " ").replace("\t", " ").strip() # Test against dummy database success = test_query_on_dummy_db(db_code, query) # Format again query = format(query) if format_sql else query if success: responses.append(query) else: responses.append(query) # Choose a random response from responses output = responses[0] if len(responses) > 0 else "###" if log: # Log the request to Firestore log_message_to_firestore(input_message, db_info, temperature, output) return output # Gradio UI Code with gr.Blocks(theme='gradio/soft') as demo: # Elements stack vertically by default just define elements in order you want them to stack header = gr.HTML("""
⚠️ Should take 30-60s to generate. Please rate the response, it helps a lot. If you get a blank output, the model server is currently down, please try again another time.
🌐 Leveraging the bitsandbytes 8-bit version of {merged_model} model.
🔗 How it's made: {initial_model} was finetuned to create {lora_model}, then merged together to create {merged_model}.
📉 Fine-tuning was performed using QLoRA techniques on the {dataset} dataset. You can view training metrics on the QLoRa adapter HF Repo.
📊 All inputs/outputs are logged to Firebase to see how the model is doing. You can also leave a rating for each generated SQL the model produces, which gets sent to the database as well.
""") examples = gr.Examples([ ["What is the average, minimum, and maximum age of all singers from France?", "| stadium : stadium_id , location , name , capacity , highest , lowest , average | singer : singer_id , name , country , song_name , song_release_year , age , is_male | concert : concert_id , concert_name , theme , stadium_id , year | singer_in_concert : concert_id , singer_id | concert.stadium_id = stadium.stadium_id | singer_in_concert.singer_id = singer.singer_id | singer_in_concert.concert_id = concert.concert_id |"], ["How many students have dogs?", "| student : stuid , lname , fname , age , sex , major , advisor , city_code | has_pet : stuid , petid | pets : petid , pettype , pet_age , weight | has_pet.stuid = student.stuid | has_pet.petid = pets.petid | pets.pettype = 'Dog' |"], ], inputs=[input_text, db_info, temperature, top_p, top_k, repetition_penalty, format_sql], fn=generate, cache_examples=False if platform.system() == "Windows" or platform.system() == "Darwin" else True, outputs=output_box) with gr.Accordion("More Examples", open=False): examples = gr.Examples([ ["What is the average weight of pets of all students?", "| student : stuid , lname , fname , age , sex , major , advisor , city_code | has_pet : stuid , petid | pets : petid , pettype , pet_age , weight | has_pet.stuid = student.stuid | has_pet.petid = pets.petid |"], ["How many male singers performed in concerts in the year 2023?", "| stadium : stadium_id , location , name , capacity , highest , lowest , average | singer : singer_id , name , country , song_name , song_release_year , age , is_male | concert : concert_id , concert_name , theme , stadium_id , year | singer_in_concert : concert_id , singer_id | concert.stadium_id = stadium.stadium_id | singer_in_concert.singer_id = singer.singer_id | singer_in_concert.concert_id = concert.concert_id |"], ["For students who have pets, how many pets does each student have? List their ids instead of names.", "| student : stuid , lname , fname , age , sex , major , advisor , city_code | has_pet : stuid , petid | pets : petid , pettype , pet_age , weight | has_pet.stuid = student.stuid | has_pet.petid = pets.petid |"], ["Show location and name for all stadiums with a capacity between 5000 and 10000.", "| stadium : stadium_id , location , name , capacity , highest , lowest , average | singer : singer_id , name , country , song_name , song_release_year , age , is_male | concert : concert_id , concert_name , theme , stadium_id , year | singer_in_concert : concert_id , singer_id | concert.stadium_id = stadium.stadium_id | singer_in_concert.singer_id = singer.singer_id | singer_in_concert.concert_id = concert.concert_id |"], ["What are the number of concerts that occurred in the stadium with the largest capacity ?", "| stadium : stadium_id , location , name , capacity , highest , lowest , average | singer : singer_id , name , country , song_name , song_release_year , age , is_male | concert : concert_id , concert_name , theme , stadium_id , year | singer_in_concert : concert_id , singer_id | concert.stadium_id = stadium.stadium_id | singer_in_concert.singer_id = singer.singer_id | singer_in_concert.concert_id = concert.concert_id |"], ["Which student has the oldest pet?", "| student : stuid , lname , fname , age , sex , major , advisor , city_code | has_pet : stuid , petid | pets : petid , pettype , pet_age , weight | has_pet.stuid = student.stuid | has_pet.petid = pets.petid |"], ["List the names of all singers who performed in a concert with the theme 'Rock'", "| stadium : stadium_id , location , name , capacity , highest , lowest , average | singer : singer_id , name , country , song_name , song_release_year , age , is_male | concert : concert_id , concert_name , theme , stadium_id , year | singer_in_concert : concert_id , singer_id | concert.stadium_id = stadium.stadium_id | singer_in_concert.singer_id = singer.singer_id | singer_in_concert.concert_id = concert.concert_id |"], ["List all students who don't have pets.", "| student : stuid , lname , fname , age , sex , major , advisor , city_code | has_pet : stuid , petid | pets : petid , pettype , pet_age , weight | has_pet.stuid = student.stuid | has_pet.petid = pets.petid |"], ], inputs=[input_text, db_info, temperature, top_p, top_k, repetition_penalty, format_sql], fn=generate, cache_examples=False, outputs=output_box) readme_content = requests.get(f"https://huggingface.co/{merged_model}/raw/main/README.md").text readme_content = re.sub('---.*?---', '', readme_content, flags=re.DOTALL) #Remove YAML front matter with gr.Accordion("📖 Model Readme", open=True): readme = gr.Markdown( readme_content, ) with gr.Accordion("Disabled Options:", open=False): log = gr.Checkbox(label="Log to Firebase", value=True, interactive=False) # When the button is clicked, call the generate function, inputs are taken from the UI elements, outputs are sent to outputs elements run_button.click(fn=generate, inputs=[input_text, db_info, temperature, top_p, top_k, repetition_penalty, format_sql, log, num_return_sequences, num_beams, do_sample], outputs=output_box, api_name="txt2sql") clear_button.add([input_text, db_info, output_box]) # Firebase code - for rating the generated SQL (remove if you don't want to use Firebase) rate_up.click(fn=log_rating_to_firestore, inputs=[input_text, db_info, temperature, output_box, rate_up]) rate_down.click(fn=log_rating_to_firestore, inputs=[input_text, db_info, temperature, output_box, rate_down]) demo.queue(concurrency_count=1, max_size=20).launch(debug=True)