PawinC commited on
Commit
2ba1842
1 Parent(s): 2b0e1fe

Quick bugfix

Browse files
Files changed (1) hide show
  1. app/main.py +3 -3
app/main.py CHANGED
@@ -13,9 +13,9 @@ from typing import Optional
13
  # MODEL LOADING, FUNCTIONS, AND TESTING
14
 
15
  print("Loading model...")
16
- SAllm = Llama(model_path="/models/final-gemma2b_SA-Q8_0.gguf", mmap=False, mlock=True)
17
- FIllm = Llama(model_path="/models/final-gemma7b_FI-Q8_0.gguf", mmap=False, mlock=True)
18
- WIllm = Llama(model_path="/models/final-GemmaWild7b-Q8_0.gguf", mmap=False, mlock=True)
19
  # n_gpu_layers=28, # Uncomment to use GPU acceleration
20
  # seed=1337, # Uncomment to set a specific seed
21
  # n_ctx=2048, # Uncomment to increase the context window
 
13
  # MODEL LOADING, FUNCTIONS, AND TESTING
14
 
15
  print("Loading model...")
16
+ SAllm = Llama(model_path="/models/final-gemma2b_SA-Q8_0.gguf", use_mmap=False, use_mlock=True)
17
+ FIllm = Llama(model_path="/models/final-gemma7b_FI-Q8_0.gguf", use_mmap=False, use_mlock=True)
18
+ WIllm = Llama(model_path="/models/final-GemmaWild7b-Q8_0.gguf", use_mmap=False, use_mlock=True)
19
  # n_gpu_layers=28, # Uncomment to use GPU acceleration
20
  # seed=1337, # Uncomment to set a specific seed
21
  # n_ctx=2048, # Uncomment to increase the context window