Update README.md
Browse files
README.md
CHANGED
@@ -25,7 +25,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
25 |
tokenizer = AutoTokenizer.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
26 |
model = AutoModelForCausalLM.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
27 |
|
28 |
-
# Run text generation pipeline with our
|
29 |
context = "CREATE TABLE Student (name VARCHAR, college VARCHAR, age VARCHAR, group VARCHAR, marks VARCHAR)"
|
30 |
question = "List the name of Students belongs to school 'St. Xavier' and having marks greater than '600'"
|
31 |
|
@@ -37,14 +37,14 @@ prompt = f"""Below is an context that describes a sql query, paired with an ques
|
|
37 |
### Answer:"""
|
38 |
|
39 |
sequences = pipeline(
|
40 |
-
prompt,
|
41 |
-
do_sample=True,
|
42 |
-
top_k=10,
|
43 |
-
num_return_sequences=1,
|
44 |
-
eos_token_id=tokenizer.eos_token_id,
|
45 |
-
max_length=200,
|
46 |
)
|
47 |
for seq in sequences:
|
48 |
-
print(f"Result: {seq['generated_text']}")
|
49 |
|
50 |
```
|
|
|
25 |
tokenizer = AutoTokenizer.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
26 |
model = AutoModelForCausalLM.from_pretrained("ekshat/Llama-2-7b-chat-finetune-for-text2sql")
|
27 |
|
28 |
+
# Run text generation pipeline with our model
|
29 |
context = "CREATE TABLE Student (name VARCHAR, college VARCHAR, age VARCHAR, group VARCHAR, marks VARCHAR)"
|
30 |
question = "List the name of Students belongs to school 'St. Xavier' and having marks greater than '600'"
|
31 |
|
|
|
37 |
### Answer:"""
|
38 |
|
39 |
sequences = pipeline(
|
40 |
+
prompt,
|
41 |
+
do_sample=True,
|
42 |
+
top_k=10,
|
43 |
+
num_return_sequences=1,
|
44 |
+
eos_token_id=tokenizer.eos_token_id,
|
45 |
+
max_length=200,
|
46 |
)
|
47 |
for seq in sequences:
|
48 |
+
print(f"Result: {seq['generated_text']}")
|
49 |
|
50 |
```
|