JiunYi commited on
Commit
c50b520
1 Parent(s): e36f019

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -7
README.md CHANGED
@@ -29,19 +29,16 @@ from peft import PeftModel
29
  from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
30
 
31
 
32
- max_memory = {i: "15GIB" for i in range(torch.cuda.device_count())}
33
- tokenizer = LlamaTokenizer.from_pretrained(base_model)
34
  model = LlamaForCausalLM.from_pretrained(
35
- base_model,
36
  load_in_8bit=True,
37
  torch_dtype=torch.float16,
38
  device_map="auto"
39
- max_memory=max_memory
40
  )
41
  model = PeftModel.from_pretrained(
42
  model,
43
- lora_weights,
44
- torch_dtype=torch.float16,
45
- max_memory=max_memory
46
  )
47
  ```
 
29
  from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
30
 
31
 
32
+ tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
 
33
  model = LlamaForCausalLM.from_pretrained(
34
+ "decapoda-research/llama-7b-hf",
35
  load_in_8bit=True,
36
  torch_dtype=torch.float16,
37
  device_map="auto"
 
38
  )
39
  model = PeftModel.from_pretrained(
40
  model,
41
+ "DataAgent/llama-7b-alpaca-zh-20k",
42
+ torch_dtype=torch.float16
 
43
  )
44
  ```