SwastikM commited on
Commit
f688400
1 Parent(s): bbcfbc8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -62,11 +62,11 @@ instruction = """"Help me set up my daily to-do list!""""
62
  ```
63
  ```python
64
  from peft import PeftModel, PeftConfig
65
- from transformers import AutoModelForCausalLM
66
 
67
- config = PeftConfig.from_pretrained("SwastikM/Llama-2-7B-Chat-text2code") #PEFT Config
68
- model = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GPTQ") #Loading the Base Model
69
- model = PeftModel.from_pretrained(model, "SwastikM/Llama-2-7B-Chat-text2code") #Combining Trained Adapter with Base Model
70
  tokenizer = AutoTokenizer.from_pretrained("SwastikM/Llama-2-7B-Chat-text2code")
71
 
72
  inputs = tokenizer(instruction, return_tensors="pt").input_ids.to('cuda')
 
62
  ```
63
  ```python
64
  from peft import PeftModel, PeftConfig
65
+ from transformers import AutoModelForCausalLM,AutoTokenizer
66
 
67
+ config = PeftConfig.from_pretrained("SwastikM/Llama-2-7B-Chat-text2code") #PEFT Config
68
+ model = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GPTQ",device_map='auto') #Loading the Base Model
69
+ model = PeftModel.from_pretrained(model, "SwastikM/Llama-2-7B-Chat-text2code") #Combining Trained Adapter with Base Model
70
  tokenizer = AutoTokenizer.from_pretrained("SwastikM/Llama-2-7B-Chat-text2code")
71
 
72
  inputs = tokenizer(instruction, return_tensors="pt").input_ids.to('cuda')