DBMe commited on
Commit
b97fd63
1 Parent(s): 32e8082

Upload config.yml

Browse files
Files changed (1) hide show
  1. config.yml +212 -0
config.yml ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sample YAML file for configuration.
2
+ # Comment and uncomment values as needed.
3
+ # Every value has a default within the application.
4
+ # This file serves to be a drop in for config.yml
5
+
6
+ # Unless specified in the comments, DO NOT put these options in quotes!
7
+ # You can use https://www.yamllint.com/ if you want to check your YAML formatting.
8
+
9
+ # Options for networking
10
+ network:
11
+ # The IP to host on (default: 127.0.0.1).
12
+ # Use 0.0.0.0 to expose on all network adapters.
13
+ host: 0.0.0.0
14
+
15
+ # The port to host on (default: 5000).
16
+ port: 5000
17
+
18
+ # Disable HTTP token authentication with requests.
19
+ # WARNING: This will make your instance vulnerable!
20
+ # Turn on this option if you are ONLY connecting from localhost.
21
+ disable_auth: false
22
+
23
+ # Send tracebacks over the API (default: False).
24
+ # NOTE: Only enable this for debug purposes.
25
+ send_tracebacks: false
26
+
27
+ # Select API servers to enable (default: ["OAI"]).
28
+ # Possible values: OAI, Kobold.
29
+ api_servers: ["oai"]
30
+
31
+ # Options for logging
32
+ logging:
33
+ # Enable prompt logging (default: False).
34
+ log_prompt: false
35
+
36
+ # Enable generation parameter logging (default: False).
37
+ log_generation_params: false
38
+
39
+ # Enable request logging (default: False).
40
+ # NOTE: Only use this for debugging!
41
+ log_requests: false
42
+
43
+ # Options for model overrides and loading
44
+ # Please read the comments to understand how arguments are handled
45
+ # between initial and API loads
46
+ model:
47
+ # Directory to look for models (default: models).
48
+ # Windows users, do NOT put this path in quotes!
49
+ model_dir: models
50
+
51
+ # Allow direct loading of models from a completion or chat completion request (default: False).
52
+ inline_model_loading: false
53
+
54
+ # Sends dummy model names when the models endpoint is queried.
55
+ # Enable this if the client is looking for specific OAI models.
56
+ use_dummy_models: false
57
+
58
+ # An initial model to load.
59
+ # Make sure the model is located in the model directory!
60
+ # REQUIRED: This must be filled out to load a model on startup.
61
+ model_name: EVA-Qwen2.5-72B-v0.0_exl2_4.48bpw
62
+
63
+ # Names of args to use as a fallback for API load requests (default: []).
64
+ # For example, if you always want cache_mode to be Q4 instead of on the inital model load, add "cache_mode" to this array.
65
+ # Example: ['max_seq_len', 'cache_mode'].
66
+ use_as_default: []
67
+
68
+ # Max sequence length (default: Empty).
69
+ # Fetched from the model's base sequence length in config.json by default.
70
+ max_seq_len: 65536
71
+
72
+ # Overrides base model context length (default: Empty).
73
+ # WARNING: Don't set this unless you know what you're doing!
74
+ # Again, do NOT use this for configuring context length, use max_seq_len above ^
75
+ override_base_seq_len:
76
+
77
+ # Load model with tensor parallelism.
78
+ # Falls back to autosplit if GPU split isn't provided.
79
+ # This ignores the gpu_split_auto value.
80
+ tensor_parallel: false
81
+
82
+ # Automatically allocate resources to GPUs (default: True).
83
+ # Not parsed for single GPU users.
84
+ gpu_split_auto: true
85
+
86
+ # Reserve VRAM used for autosplit loading (default: 96 MB on GPU 0).
87
+ # Represented as an array of MB per GPU.
88
+ autosplit_reserve: [0]
89
+
90
+ # An integer array of GBs of VRAM to split between GPUs (default: []).
91
+ # Used with tensor parallelism.
92
+ gpu_split: []
93
+
94
+ # Rope scale (default: 1.0).
95
+ # Same as compress_pos_emb.
96
+ # Use if the model was trained on long context with rope.
97
+ # Leave blank to pull the value from the model.
98
+ rope_scale: 1.0
99
+
100
+ # Rope alpha (default: None).
101
+ # Same as alpha_value. Set to "auto" to auto-calculate.
102
+ # Leaving this value blank will either pull from the model or auto-calculate.
103
+ rope_alpha:
104
+
105
+ # Enable different cache modes for VRAM savings (default: FP16).
106
+ # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'.
107
+ cache_mode: Q4
108
+
109
+ # Size of the prompt cache to allocate (default: max_seq_len).
110
+ # Must be a multiple of 256 and can't be less than max_seq_len.
111
+ # For CFG, set this to 2 * max_seq_len.
112
+ cache_size:
113
+
114
+ # Chunk size for prompt ingestion (default: 2048).
115
+ # A lower value reduces VRAM usage but decreases ingestion speed.
116
+ # NOTE: Effects vary depending on the model.
117
+ # An ideal value is between 512 and 4096.
118
+ chunk_size: 1280
119
+
120
+ # Set the maximum number of prompts to process at one time (default: None/Automatic).
121
+ # Automatically calculated if left blank.
122
+ # NOTE: Only available for Nvidia ampere (30 series) and above GPUs.
123
+ max_batch_size:
124
+
125
+ # Set the prompt template for this model. (default: None)
126
+ # If empty, attempts to look for the model's chat template.
127
+ # If a model contains multiple templates in its tokenizer_config.json,
128
+ # set prompt_template to the name of the template you want to use.
129
+ # NOTE: Only works with chat completion message lists!
130
+ prompt_template:
131
+
132
+ # Number of experts to use per token.
133
+ # Fetched from the model's config.json if empty.
134
+ # NOTE: For MoE models only.
135
+ # WARNING: Don't set this unless you know what you're doing!
136
+ num_experts_per_token:
137
+
138
+ # Enables fasttensors to possibly increase model loading speeds (default: False).
139
+ fasttensors: true
140
+
141
+ # Options for draft models (speculative decoding)
142
+ # This will use more VRAM!
143
+ draft_model:
144
+ # Directory to look for draft models (default: models)
145
+ draft_model_dir: models
146
+
147
+ # An initial draft model to load.
148
+ # Ensure the model is in the model directory.
149
+ draft_model_name:
150
+
151
+ # Rope scale for draft models (default: 1.0).
152
+ # Same as compress_pos_emb.
153
+ # Use if the draft model was trained on long context with rope.
154
+ draft_rope_scale: 1.0
155
+
156
+ # Rope alpha for draft models (default: None).
157
+ # Same as alpha_value. Set to "auto" to auto-calculate.
158
+ # Leaving this value blank will either pull from the model or auto-calculate.
159
+ draft_rope_alpha:
160
+
161
+ # Cache mode for draft models to save VRAM (default: FP16).
162
+ # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'.
163
+ draft_cache_mode: FP16
164
+
165
+ # Options for Loras
166
+ lora:
167
+ # Directory to look for LoRAs (default: loras).
168
+ lora_dir: loras
169
+
170
+ # List of LoRAs to load and associated scaling factors (default scale: 1.0).
171
+ # For the YAML file, add each entry as a YAML list:
172
+ # - name: lora1
173
+ # scaling: 1.0
174
+ loras:
175
+
176
+ # Options for embedding models and loading.
177
+ # NOTE: Embeddings requires the "extras" feature to be installed
178
+ # Install it via "pip install .[extras]"
179
+ embeddings:
180
+ # Directory to look for embedding models (default: models).
181
+ embedding_model_dir: models
182
+
183
+ # Device to load embedding models on (default: cpu).
184
+ # Possible values: cpu, auto, cuda.
185
+ # NOTE: It's recommended to load embedding models on the CPU.
186
+ # If using an AMD GPU, set this value to 'cuda'.
187
+ embeddings_device: cpu
188
+
189
+ # An initial embedding model to load on the infinity backend.
190
+ embedding_model_name:
191
+ sampling:
192
+
193
+ # Options for development and experimentation
194
+ developer:
195
+ # Skip Exllamav2 version check (default: False).
196
+ # WARNING: It's highly recommended to update your dependencies rather than enabling this flag.
197
+ unsafe_launch: false
198
+
199
+ # Disable API request streaming (default: False).
200
+ disable_request_streaming: false
201
+
202
+ # Enable the torch CUDA malloc backend (default: False).
203
+ cuda_malloc_backend: true
204
+
205
+ # Run asyncio using Uvloop or Winloop which can improve performance.
206
+ # NOTE: It's recommended to enable this, but if something breaks turn this off.
207
+ uvloop: true
208
+
209
+ # Set process to use a higher priority.
210
+ # For realtime process priority, run as administrator or sudo.
211
+ # Otherwise, the priority will be set to high.
212
+ realtime_process_priority: true