mixture-of-llamas-dare-linear / mergekit_config.yml
johnsutor's picture
Upload folder using huggingface_hub
c0a7927 verified
raw
history blame contribute delete
773 Bytes
models:
- model: meta-llama/Meta-Llama-3-8B-Instruct
parameters:
density: 0.5
weight: 1.0
- model: failspy/Meta-Llama-3-8B-Instruct-abliterated-v3
parameters:
density: 0.5
weight: 1.0
- model: VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct
parameters:
density: 0.5
weight: 1.0
- model: DeepMount00/Llama-3-8b-Ita
parameters:
density: 0.5
weight: 1.0
- model: nbeerbower/llama-3-gutenberg-8B
parameters:
density: 0.5
weight: 1.0
- model: jpacifico/French-Alpaca-Llama3-8B-Instruct-v1.0
parameters:
density: 0.5
weight: 1.0
merge_method: dare_linear
tokenizer_source: union
base_model: meta-llama/Meta-Llama-3-8B-Instruct
parameters:
int8_mask: true
dtype: bfloat16