Change use_cache to True which significantly speeds up inference
#2
by
TheBloke
- opened
- config.json +1 -1
config.json
CHANGED
@@ -18,6 +18,6 @@
|
|
18 |
"tie_word_embeddings": false,
|
19 |
"torch_dtype": "float16",
|
20 |
"transformers_version": "4.29.0.dev0",
|
21 |
-
"use_cache":
|
22 |
"vocab_size": 32001
|
23 |
}
|
|
|
18 |
"tie_word_embeddings": false,
|
19 |
"torch_dtype": "float16",
|
20 |
"transformers_version": "4.29.0.dev0",
|
21 |
+
"use_cache": true,
|
22 |
"vocab_size": 32001
|
23 |
}
|