DBMe commited on
Commit
1174dc5
1 Parent(s): ea5f351

Upload config.yml

Browse files
Files changed (1) hide show
  1. config.yml +211 -0
config.yml ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sample YAML file for configuration.
2
+ # Comment and uncomment values as needed.
3
+ # Every value has a default within the application.
4
+ # This file serves to be a drop in for config.yml
5
+
6
+ # Unless specified in the comments, DO NOT put these options in quotes!
7
+ # You can use https://www.yamllint.com/ if you want to check your YAML formatting.
8
+
9
+ # Options for networking
10
+ network:
11
+ # The IP to host on (default: 127.0.0.1).
12
+ # Use 0.0.0.0 to expose on all network adapters.
13
+ host: 0.0.0.0
14
+
15
+ # The port to host on (default: 5000).
16
+ port: 5000
17
+
18
+ # Disable HTTP token authentication with requests.
19
+ # WARNING: This will make your instance vulnerable!
20
+ # Turn on this option if you are ONLY connecting from localhost.
21
+ disable_auth: false
22
+
23
+ # Send tracebacks over the API (default: False).
24
+ # NOTE: Only enable this for debug purposes.
25
+ send_tracebacks: false
26
+
27
+ # Select API servers to enable (default: ["OAI"]).
28
+ # Possible values: OAI, Kobold.
29
+ api_servers: ["oai"]
30
+
31
+ # Options for logging
32
+ logging:
33
+ # Enable prompt logging (default: False).
34
+ log_prompt: false
35
+
36
+ # Enable generation parameter logging (default: False).
37
+ log_generation_params: false
38
+
39
+ # Enable request logging (default: False).
40
+ # NOTE: Only use this for debugging!
41
+ log_requests: false
42
+
43
+ # Options for model overrides and loading
44
+ # Please read the comments to understand how arguments are handled
45
+ # between initial and API loads
46
+ model:
47
+ # Directory to look for models (default: models).
48
+ # Windows users, do NOT put this path in quotes!
49
+ model_dir: models
50
+
51
+ # Allow direct loading of models from a completion or chat completion request (default: False).
52
+ inline_model_loading: false
53
+
54
+ # Sends dummy model names when the models endpoint is queried.
55
+ # Enable this if the client is looking for specific OAI models.
56
+ use_dummy_models: false
57
+
58
+ # An initial model to load.
59
+ # Make sure the model is located in the model directory!
60
+ # REQUIRED: This must be filled out to load a model on startup.
61
+ model_name: Llama-3.1-Nemotron-70B-Instruct-HF_exl2_4.6bpw
62
+
63
+ # Names of args to use as a fallback for API load requests (default: []).
64
+ # For example, if you always want cache_mode to be Q4 instead of on the inital model load, add "cache_mode" to this array.
65
+ # Example: ['max_seq_len', 'cache_mode'].
66
+ use_as_default: []
67
+
68
+ # Max sequence length (default: Empty).
69
+ # Fetched from the model's base sequence length in config.json by default.
70
+ max_seq_len: 65536
71
+ # Overrides base model context length (default: Empty).
72
+ # WARNING: Don't set this unless you know what you're doing!
73
+ # Again, do NOT use this for configuring context length, use max_seq_len above ^
74
+ override_base_seq_len:
75
+
76
+ # Load model with tensor parallelism.
77
+ # Falls back to autosplit if GPU split isn't provided.
78
+ # This ignores the gpu_split_auto value.
79
+ tensor_parallel: false
80
+
81
+ # Automatically allocate resources to GPUs (default: True).
82
+ # Not parsed for single GPU users.
83
+ gpu_split_auto: true
84
+
85
+ # Reserve VRAM used for autosplit loading (default: 96 MB on GPU 0).
86
+ # Represented as an array of MB per GPU.
87
+ autosplit_reserve: [0]
88
+
89
+ # An integer array of GBs of VRAM to split between GPUs (default: []).
90
+ # Used with tensor parallelism.
91
+ gpu_split: []
92
+
93
+ # Rope scale (default: 1.0).
94
+ # Same as compress_pos_emb.
95
+ # Use if the model was trained on long context with rope.
96
+ # Leave blank to pull the value from the model.
97
+ rope_scale: 1.0
98
+
99
+ # Rope alpha (default: None).
100
+ # Same as alpha_value. Set to "auto" to auto-calculate.
101
+ # Leaving this value blank will either pull from the model or auto-calculate.
102
+ rope_alpha:
103
+
104
+ # Enable different cache modes for VRAM savings (default: FP16).
105
+ # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'.
106
+ cache_mode: Q4
107
+
108
+ # Size of the prompt cache to allocate (default: max_seq_len).
109
+ # Must be a multiple of 256 and can't be less than max_seq_len.
110
+ # For CFG, set this to 2 * max_seq_len.
111
+ cache_size:
112
+
113
+ # Chunk size for prompt ingestion (default: 2048).
114
+ # A lower value reduces VRAM usage but decreases ingestion speed.
115
+ # NOTE: Effects vary depending on the model.
116
+ # An ideal value is between 512 and 4096.
117
+ chunk_size: 2048
118
+
119
+ # Set the maximum number of prompts to process at one time (default: None/Automatic).
120
+ # Automatically calculated if left blank.
121
+ # NOTE: Only available for Nvidia ampere (30 series) and above GPUs.
122
+ max_batch_size:
123
+
124
+ # Set the prompt template for this model. (default: None)
125
+ # If empty, attempts to look for the model's chat template.
126
+ # If a model contains multiple templates in its tokenizer_config.json,
127
+ # set prompt_template to the name of the template you want to use.
128
+ # NOTE: Only works with chat completion message lists!
129
+ prompt_template:
130
+
131
+ # Number of experts to use per token.
132
+ # Fetched from the model's config.json if empty.
133
+ # NOTE: For MoE models only.
134
+ # WARNING: Don't set this unless you know what you're doing!
135
+ num_experts_per_token:
136
+
137
+ # Enables fasttensors to possibly increase model loading speeds (default: False).
138
+ fasttensors: true
139
+
140
+ # Options for draft models (speculative decoding)
141
+ # This will use more VRAM!
142
+ draft_model:
143
+ # Directory to look for draft models (default: models)
144
+ draft_model_dir: models
145
+
146
+ # An initial draft model to load.
147
+ # Ensure the model is in the model directory.
148
+ draft_model_name:
149
+
150
+ # Rope scale for draft models (default: 1.0).
151
+ # Same as compress_pos_emb.
152
+ # Use if the draft model was trained on long context with rope.
153
+ draft_rope_scale: 1.0
154
+
155
+ # Rope alpha for draft models (default: None).
156
+ # Same as alpha_value. Set to "auto" to auto-calculate.
157
+ # Leaving this value blank will either pull from the model or auto-calculate.
158
+ draft_rope_alpha:
159
+
160
+ # Cache mode for draft models to save VRAM (default: FP16).
161
+ # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'.
162
+ draft_cache_mode: FP16
163
+
164
+ # Options for Loras
165
+ lora:
166
+ # Directory to look for LoRAs (default: loras).
167
+ lora_dir: loras
168
+
169
+ # List of LoRAs to load and associated scaling factors (default scale: 1.0).
170
+ # For the YAML file, add each entry as a YAML list:
171
+ # - name: lora1
172
+ # scaling: 1.0
173
+ loras:
174
+
175
+ # Options for embedding models and loading.
176
+ # NOTE: Embeddings requires the "extras" feature to be installed
177
+ # Install it via "pip install .[extras]"
178
+ embeddings:
179
+ # Directory to look for embedding models (default: models).
180
+ embedding_model_dir: models
181
+
182
+ # Device to load embedding models on (default: cpu).
183
+ # Possible values: cpu, auto, cuda.
184
+ # NOTE: It's recommended to load embedding models on the CPU.
185
+ # If using an AMD GPU, set this value to 'cuda'.
186
+ embeddings_device: cpu
187
+
188
+ # An initial embedding model to load on the infinity backend.
189
+ embedding_model_name:
190
+ sampling:
191
+
192
+ # Options for development and experimentation
193
+ developer:
194
+ # Skip Exllamav2 version check (default: False).
195
+ # WARNING: It's highly recommended to update your dependencies rather than enabling this flag.
196
+ unsafe_launch: false
197
+
198
+ # Disable API request streaming (default: False).
199
+ disable_request_streaming: false
200
+
201
+ # Enable the torch CUDA malloc backend (default: False).
202
+ cuda_malloc_backend: true
203
+
204
+ # Run asyncio using Uvloop or Winloop which can improve performance.
205
+ # NOTE: It's recommended to enable this, but if something breaks turn this off.
206
+ uvloop: true
207
+
208
+ # Set process to use a higher priority.
209
+ # For realtime process priority, run as administrator or sudo.
210
+ # Otherwise, the priority will be set to high.
211
+ realtime_process_priority: true