|
--- |
|
language: |
|
- en |
|
tags: |
|
- text-to-sql |
|
- gpt2 |
|
- gpt2-medium |
|
- nlp-to-sql |
|
- text2sql |
|
- sql |
|
datasets: |
|
- b-mc2/sql-create-context |
|
license: other |
|
--- |
|
# Model Card |
|
|
|
<!-- The base model used for training is gpt2-medium. We finetuned it on the following dataset: b-mc2/sql-create-context --> |
|
|
|
This is my first fine tuned LLM project. |
|
|
|
|
|
## Usage |
|
|
|
``` |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
finetunedGPT = GPT2LMHeadModel.from_pretrained("rakeshkiriyath/gpt2Medium_text_to_sql") |
|
finetunedTokenizer = GPT2Tokenizer.from_pretrained("rakeshkiriyath/gpt2Medium_text_to_sql") |
|
|
|
def generate_text_to_sql(query, model, tokenizer, max_length=256): |
|
prompt = f"Translate the following English question to SQL: {query}" |
|
|
|
input_tensor = tokenizer.encode(prompt, return_tensors='pt').to('cuda') |
|
|
|
output = model.generate(input_tensor, max_length=max_length, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) |
|
|
|
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
# Return only the SQL part (removing the input text) |
|
sql_output = decoded_output[len(prompt):].strip() |
|
|
|
return sql_output |
|
|
|
queryList = ["I need a list of employees who joined in the company last 6 months with a salary hike of 30% ", |
|
"Give me loginid,status,company of a user who is mapped to the organization XYZ "] |
|
|
|
for query in queryList: |
|
|
|
sql_result = generate_text_to_sql(query, finetunedGPT, finetunedTokenizer) |
|
print(sql_result,"\n") |
|
|
|
``` |
|
|
|
### Output |
|
|
|
SELECT COUNT(*) FROM employees WHERE last_6_months = "6 months" AND salary_hike = "30%" \ |
|
SELECT loginid,status,company FROM user_mapped_to_organization WHERE mapping = "XYZ" |
|
|
|
|
|
#### Training Hyperparameters |
|
|
|
num_train_epochs=1 \ |
|
per_device_train_batch_size=3 \ |
|
gradient_accumulation_steps=9 \ |
|
learning_rate=5e-5 \ |
|
weight_decay=0.01 |
|
|
|
|
|
## Evaluation |
|
|
|
| Step | Training Loss | |
|
| -------- | ------- | |
|
| 500 | 0.337800 | |
|
| 1000 | 0.262900 | |
|
| 1500 | 0.253200 | |
|
| 2000 | 0.246400 | |
|
|
|
{'eval_loss': 0.23689331114292145, 'eval_runtime': 104.4102, 'eval_samples_per_second': 67.043, 'eval_steps_per_second': 8.38, 'epoch': 1.0} |