Spaces:
Running
Running
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load the model and tokenizer | |
model_name = "premai-io/prem-1B-SQL" | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Function to generate SQL from the user's input | |
def generate_sql_query(question): | |
input_text = f"Question: {question} SQL Query:" | |
# Tokenize the input | |
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True) | |
# Generate the SQL query | |
outputs = model.generate(inputs["input_ids"], max_length=100) | |
# Decode the output | |
sql_query = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return sql_query | |
# Streamlit app UI | |
def main(): | |
st.title("Text-to-SQL with prem-1B-SQL Model") | |
st.write("This app generates SQL queries based on your natural language question.") | |
# Input for the user's question | |
question_input = st.text_input("Enter your question:") | |
if question_input: | |
# Generate the SQL query | |
sql_query = generate_sql_query(question_input) | |
# Display the generated SQL query | |
st.write("Generated SQL Query:") | |
st.code(sql_query) | |
if __name__ == "__main__": | |
main() | |