ICEPVP8977 commited on
Commit
346beef
1 Parent(s): c8dd085

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -36,17 +36,17 @@ import torch
36
  from transformers import AutoModelForCausalLM, AutoTokenizer
37
 
38
 
39
- # Load the merged model and tokenizer
40
- merged_model = AutoModelForCausalLM.from_pretrained("./model", torch_dtype=torch.float16, device_map="auto")
41
  tokenizer = AutoTokenizer.from_pretrained("./model")
42
  ```
43
 
44
 
45
  ```python
46
  prompt = "Your_question_here"
47
- inputs = tokenizer(prompt, return_tensors="pt").to(merged_model.device)
48
  max_new_tokens = 2000 # Set the maximum number of tokens in the response
49
- outputs = merged_model.generate(**inputs, max_new_tokens=max_new_tokens)
50
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
  print(response)
52
  ```
 
36
  from transformers import AutoModelForCausalLM, AutoTokenizer
37
 
38
 
39
+
40
+ model = AutoModelForCausalLM.from_pretrained("./model", torch_dtype=torch.float16, device_map="auto")
41
  tokenizer = AutoTokenizer.from_pretrained("./model")
42
  ```
43
 
44
 
45
  ```python
46
  prompt = "Your_question_here"
47
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
48
  max_new_tokens = 2000 # Set the maximum number of tokens in the response
49
+ outputs = model.generate(**inputs, max_new_tokens=max_new_tokens)
50
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
  print(response)
52
  ```