TheBloke commited on
Commit
ef916a4
1 Parent(s): 423d1b3

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -170,7 +170,7 @@ CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers
170
  from ctransformers import AutoModelForCausalLM
171
 
172
  # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
173
- llm = AutoModelForCausalLM.from_pretrained("None", model_file="codellama-34b-instruct.q4_K_M.gguf", model_type="llama", gpu_layers=50)
174
 
175
  print(llm("AI is going to"))
176
  ```
 
170
  from ctransformers import AutoModelForCausalLM
171
 
172
  # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
173
+ llm = AutoModelForCausalLM.from_pretrained("TheBloke/CodeLlama-34B-Instruct-GGUF", model_file="codellama-34b-instruct.q4_K_M.gguf", model_type="llama", gpu_layers=50)
174
 
175
  print(llm("AI is going to"))
176
  ```