vasileion commited on
Commit
e054623
1 Parent(s): 6e3972c

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +5 -5
  2. config.json +1 -1
README.md CHANGED
@@ -10,9 +10,8 @@ base_model: Austism/chronos-hermes-13b
10
  inference: false
11
  model_creator: Austism
12
  model_type: llama
13
- prompt_template: >
14
- Below is an instruction that describes a task. Write a response that
15
- appropriately completes the request.
16
 
17
 
18
  ### Instruction:
@@ -21,8 +20,9 @@ prompt_template: >
21
 
22
 
23
  ### Response:
 
 
24
  quantized_by: TheBloke
25
- library_name: transformers
26
  ---
27
 
28
  <!-- header start -->
@@ -274,4 +274,4 @@ This has the aspects of chronos's nature to produce long, descriptive outputs. B
274
 
275
  This mix contains alot of chronos's writing style and 'flavour' with far less tendency of going AWOL and spouting nonsensical babble.
276
 
277
- This result was much more successful than my [first chronos merge](https://huggingface.co/Austism/chronos-wizardlm-uc-scot-st-13b).
 
10
  inference: false
11
  model_creator: Austism
12
  model_type: llama
13
+ prompt_template: 'Below is an instruction that describes a task. Write a response
14
+ that appropriately completes the request.
 
15
 
16
 
17
  ### Instruction:
 
20
 
21
 
22
  ### Response:
23
+
24
+ '
25
  quantized_by: TheBloke
 
26
  ---
27
 
28
  <!-- header start -->
 
274
 
275
  This mix contains alot of chronos's writing style and 'flavour' with far less tendency of going AWOL and spouting nonsensical babble.
276
 
277
+ This result was much more successful than my [first chronos merge](https://huggingface.co/Austism/chronos-wizardlm-uc-scot-st-13b).
config.json CHANGED
@@ -17,7 +17,7 @@
17
  "rms_norm_eps": 1e-06,
18
  "tie_word_embeddings": false,
19
  "torch_dtype": "float16",
20
- "transformers_version": "4.28.0",
21
  "use_cache": true,
22
  "vocab_size": 32001,
23
  "quantization_config": {
 
17
  "rms_norm_eps": 1e-06,
18
  "tie_word_embeddings": false,
19
  "torch_dtype": "float16",
20
+ "transformers_version": "4.44.0",
21
  "use_cache": true,
22
  "vocab_size": 32001,
23
  "quantization_config": {