jonathanjordan21 commited on
Commit
3454c35
1 Parent(s): 64aa63c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -7
README.md CHANGED
@@ -69,19 +69,14 @@ Model inference
69
  question = "server of user id 11 with status active and server id 10"
70
  table = "table_name_77 ( user id INTEGER, status VARCHAR, server id INTEGER )"
71
 
72
-
73
-
74
-
75
-
76
  test = f"""Question: {question}\nTable: {table}\nSQL: """
77
 
78
  p = tokenizer(test, return_tensors='pt')
79
 
80
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
81
 
82
-
83
-
84
- print("output :", tokenizer.batch_decode(model.to(device).generate(**p.to(device),max_new_tokens=50),skip_special_tokens=True)[0])
85
 
86
  ```
87
 
 
69
  question = "server of user id 11 with status active and server id 10"
70
  table = "table_name_77 ( user id INTEGER, status VARCHAR, server id INTEGER )"
71
 
 
 
 
 
72
  test = f"""Question: {question}\nTable: {table}\nSQL: """
73
 
74
  p = tokenizer(test, return_tensors='pt')
75
 
76
  device = "cuda" if torch.cuda.is_available() else "cpu"
77
+ out = model.to(device).generate(**p.to(device),max_new_tokens=50)
78
 
79
+ print("SQL Query :", tokenizer.batch_decode(out,skip_special_tokens=True)[0])
 
 
80
 
81
  ```
82