Ravi theja K commited on
Commit
b68f514
1 Parent(s): 3cb9dc4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import torch
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from deepspeed.linear.config import QuantizationConfig
8
 
9
- Llamatokenizer = AutoTokenizer.from_pretrained(
10
  "Snowflake/snowflake-arctic-instruct",
11
  trust_remote_code=True
12
  )
@@ -24,7 +24,7 @@ model = AutoModelForCausalLM.from_pretrained(
24
 
25
  content = "5x + 35 = 7x - 60 + 10. Solve for x"
26
  messages = [{"role": "user", "content": content}]
27
- input_ids = Llamatokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to("cuda")
28
 
29
  outputs = model.generate(input_ids=input_ids, max_new_tokens=256)
30
- print(Llamatokenizer.decode(outputs[0]))
 
6
  from transformers import AutoModelForCausalLM, AutoTokenizer
7
  from deepspeed.linear.config import QuantizationConfig
8
 
9
+ tokenizer = AutoTokenizer.from_pretrained(
10
  "Snowflake/snowflake-arctic-instruct",
11
  trust_remote_code=True
12
  )
 
24
 
25
  content = "5x + 35 = 7x - 60 + 10. Solve for x"
26
  messages = [{"role": "user", "content": content}]
27
+ input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to("cuda")
28
 
29
  outputs = model.generate(input_ids=input_ids, max_new_tokens=256)
30
+ print(tokenizer.decode(outputs[0]))