ZwwWayne kmno4 commited on
Commit
cfdbca8
1 Parent(s): 1fa74c1

Update README.md (#1)

Browse files

- Update README.md (01b4d842703e22d0293032fb699dc5e44caf041a)


Co-authored-by: Song <kmno4@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -142,9 +142,9 @@ InternLM2 模型具备以下的技术特点
142
  ```python
143
  import torch
144
  from transformers import AutoTokenizer, AutoModelForCausalLM
145
- tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-1_8b-sft", trust_remote_code=True)
146
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,导致显存不足
147
- model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-1_8b-sft", torch_dtype=torch.float16, trust_remote_code=True).cuda()
148
  model = model.eval()
149
  response, history = model.chat(tokenizer, "你好", history=[])
150
  print(response)
@@ -159,7 +159,7 @@ print(response)
159
  import torch
160
  from transformers import AutoModelForCausalLM, AutoTokenizer
161
 
162
- model_path = "internlm/internlm2-chat-1_8b-sft"
163
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dype=torch.float16, trust_remote_code=True).cuda()
164
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
165
 
 
142
  ```python
143
  import torch
144
  from transformers import AutoTokenizer, AutoModelForCausalLM
145
+ tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-1_8b", trust_remote_code=True)
146
  # `torch_dtype=torch.float16` 可以令模型以 float16 精度加载,否则 transformers 会将模型加载为 float32,导致显存不足
147
+ model = AutoModelForCausalLM.from_pretrained("internlm/internlm2-chat-1_8b", torch_dtype=torch.float16, trust_remote_code=True).cuda()
148
  model = model.eval()
149
  response, history = model.chat(tokenizer, "你好", history=[])
150
  print(response)
 
159
  import torch
160
  from transformers import AutoModelForCausalLM, AutoTokenizer
161
 
162
+ model_path = "internlm/internlm2-chat-1_8b"
163
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dype=torch.float16, trust_remote_code=True).cuda()
164
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
165