Edit model card

Merge of top 7B models and the SLERP of other 7B models

mergekit is a toolkit for merging pre-trained language models. mergekit uses an out-of-core approach to perform unreasonably elaborate merges in resource-constrained situations. Merges can be run entirely on CPU or accelerated with as little as 8 GB of VRAM. Many merging algorithms are supported, with more coming as they catch my attention.

Eval

{
    "all": {
        "acc": 0.6571641282160704,
        "acc_stderr": 0.031918970852064334,
        "acc_norm": 0.6561506230894164,
        "acc_norm_stderr": 0.03258982989656136,
        "mc1": 0.4834761321909425,
        "mc1_stderr": 0.017493940190057723,
        "mc2": 0.6447306680251751,
        "mc2_stderr": 0.015519245883344577
    },
    "harness|arc:challenge|25": {
        "acc": 0.689419795221843,
        "acc_stderr": 0.01352229209805306,
        "acc_norm": 0.7090443686006825,
        "acc_norm_stderr": 0.013273077865907595
    },
    "harness|hellaswag|10": {
        "acc": 0.7168890659231228,
        "acc_stderr": 0.004495891440519419,
        "acc_norm": 0.8800039832702649,
        "acc_norm_stderr": 0.0032429275808698544
    },
    "harness|hendrycksTest-abstract_algebra|5": {
        "acc": 0.33,
        "acc_stderr": 0.047258156262526045,
        "acc_norm": 0.33,
        "acc_norm_stderr": 0.047258156262526045
    },
    "harness|hendrycksTest-anatomy|5": {
        "acc": 0.6370370370370371,
        "acc_stderr": 0.04153948404742398,
        "acc_norm": 0.6370370370370371,
        "acc_norm_stderr": 0.04153948404742398
    },
    "harness|hendrycksTest-astronomy|5": {
        "acc": 0.7105263157894737,
        "acc_stderr": 0.03690677986137283,
        "acc_norm": 0.7105263157894737,
        "acc_norm_stderr": 0.03690677986137283
    },
    "harness|hendrycksTest-business_ethics|5": {
        "acc": 0.65,
        "acc_stderr": 0.0479372485441102,
        "acc_norm": 0.65,
        "acc_norm_stderr": 0.0479372485441102
    },
    "harness|hendrycksTest-clinical_knowledge|5": {
        "acc": 0.6981132075471698,
        "acc_stderr": 0.02825420034443866,
        "acc_norm": 0.6981132075471698,
        "acc_norm_stderr": 0.02825420034443866
    },
    "harness|hendrycksTest-college_biology|5": {
        "acc": 0.7638888888888888,
        "acc_stderr": 0.03551446610810826,
        "acc_norm": 0.7638888888888888,
        "acc_norm_stderr": 0.03551446610810826
    },
    "harness|hendrycksTest-college_chemistry|5": {
        "acc": 0.48,
        "acc_stderr": 0.050211673156867795,
        "acc_norm": 0.48,
        "acc_norm_stderr": 0.050211673156867795
    },
    "harness|hendrycksTest-college_computer_science|5": {
        "acc": 0.56,
        "acc_stderr": 0.049888765156985884,
        "acc_norm": 0.56,
        "acc_norm_stderr": 0.049888765156985884
    },
    "harness|hendrycksTest-college_mathematics|5": {
        "acc": 0.27,
        "acc_stderr": 0.0446196043338474,
        "acc_norm": 0.27,
        "acc_norm_stderr": 0.0446196043338474
    },
    "harness|hendrycksTest-college_medicine|5": {
        "acc": 0.6589595375722543,
        "acc_stderr": 0.03614665424180826,
        "acc_norm": 0.6589595375722543,
        "acc_norm_stderr": 0.03614665424180826
    },
    "harness|hendrycksTest-college_physics|5": {
        "acc": 0.4117647058823529,
        "acc_stderr": 0.048971049527263666,
        "acc_norm": 0.4117647058823529,
        "acc_norm_stderr": 0.048971049527263666
    },
    "harness|hendrycksTest-computer_security|5": {
        "acc": 0.75,
        "acc_stderr": 0.04351941398892446,
        "acc_norm": 0.75,
        "acc_norm_stderr": 0.04351941398892446
    },
    "harness|hendrycksTest-conceptual_physics|5": {
        "acc": 0.5787234042553191,
        "acc_stderr": 0.03227834510146268,
        "acc_norm": 0.5787234042553191,
        "acc_norm_stderr": 0.03227834510146268
    },
    "harness|hendrycksTest-econometrics|5": {
        "acc": 0.5175438596491229,
        "acc_stderr": 0.04700708033551038,
        "acc_norm": 0.5175438596491229,
        "acc_norm_stderr": 0.04700708033551038
    },
    "harness|hendrycksTest-electrical_engineering|5": {
        "acc": 0.5655172413793104,
        "acc_stderr": 0.04130740879555497,
        "acc_norm": 0.5655172413793104,
        "acc_norm_stderr": 0.04130740879555497
    },
    "harness|hendrycksTest-elementary_mathematics|5": {
        "acc": 0.4312169312169312,
        "acc_stderr": 0.02550648169813821,
        "acc_norm": 0.4312169312169312,
        "acc_norm_stderr": 0.02550648169813821
    },
    "harness|hendrycksTest-formal_logic|5": {
        "acc": 0.48412698412698413,
        "acc_stderr": 0.04469881854072606,
        "acc_norm": 0.48412698412698413,
        "acc_norm_stderr": 0.04469881854072606
    },
    "harness|hendrycksTest-global_facts|5": {
        "acc": 0.33,
        "acc_stderr": 0.04725815626252604,
        "acc_norm": 0.33,
        "acc_norm_stderr": 0.04725815626252604
    },
    "harness|hendrycksTest-high_school_biology|5": {
        "acc": 0.7838709677419354,
        "acc_stderr": 0.02341529343356853,
        "acc_norm": 0.7838709677419354,
        "acc_norm_stderr": 0.02341529343356853
    },
    "harness|hendrycksTest-high_school_chemistry|5": {
        "acc": 0.4975369458128079,
        "acc_stderr": 0.03517945038691063,
        "acc_norm": 0.4975369458128079,
        "acc_norm_stderr": 0.03517945038691063
    },
    "harness|hendrycksTest-high_school_computer_science|5": {
        "acc": 0.67,
        "acc_stderr": 0.04725815626252607,
        "acc_norm": 0.67,
        "acc_norm_stderr": 0.04725815626252607
    },
    "harness|hendrycksTest-high_school_european_history|5": {
        "acc": 0.7878787878787878,
        "acc_stderr": 0.031922715695483,
        "acc_norm": 0.7878787878787878,
        "acc_norm_stderr": 0.031922715695483
    },
    "harness|hendrycksTest-high_school_geography|5": {
        "acc": 0.7929292929292929,
        "acc_stderr": 0.028869778460267045,
        "acc_norm": 0.7929292929292929,
        "acc_norm_stderr": 0.028869778460267045
    },
    "harness|hendrycksTest-high_school_government_and_politics|5": {
        "acc": 0.9015544041450777,
        "acc_stderr": 0.021500249576033456,
        "acc_norm": 0.9015544041450777,
        "acc_norm_stderr": 0.021500249576033456
    },
    "harness|hendrycksTest-high_school_macroeconomics|5": {
        "acc": 0.6666666666666666,
        "acc_stderr": 0.023901157979402534,
        "acc_norm": 0.6666666666666666,
        "acc_norm_stderr": 0.023901157979402534
    },
    "harness|hendrycksTest-high_school_mathematics|5": {
        "acc": 0.34814814814814815,
        "acc_stderr": 0.029045600290616255,
        "acc_norm": 0.34814814814814815,
        "acc_norm_stderr": 0.029045600290616255
    },
    "harness|hendrycksTest-high_school_microeconomics|5": {
        "acc": 0.680672268907563,
        "acc_stderr": 0.030283995525884396,
        "acc_norm": 0.680672268907563,
        "acc_norm_stderr": 0.030283995525884396
    },
    "harness|hendrycksTest-high_school_physics|5": {
        "acc": 0.33112582781456956,
        "acc_stderr": 0.038425817186598696,
        "acc_norm": 0.33112582781456956,
        "acc_norm_stderr": 0.038425817186598696
    },
    "harness|hendrycksTest-high_school_psychology|5": {
        "acc": 0.8385321100917431,
        "acc_stderr": 0.015776239256163224,
        "acc_norm": 0.8385321100917431,
        "acc_norm_stderr": 0.015776239256163224
    },
    "harness|hendrycksTest-high_school_statistics|5": {
        "acc": 0.5138888888888888,
        "acc_stderr": 0.03408655867977749,
        "acc_norm": 0.5138888888888888,
        "acc_norm_stderr": 0.03408655867977749
    },
    "harness|hendrycksTest-high_school_us_history|5": {
        "acc": 0.8578431372549019,
        "acc_stderr": 0.024509803921568603,
        "acc_norm": 0.8578431372549019,
        "acc_norm_stderr": 0.024509803921568603
    },
    "harness|hendrycksTest-high_school_world_history|5": {
        "acc": 0.8143459915611815,
        "acc_stderr": 0.025310495376944856,
        "acc_norm": 0.8143459915611815,
        "acc_norm_stderr": 0.025310495376944856
    },
    "harness|hendrycksTest-human_aging|5": {
        "acc": 0.6860986547085202,
        "acc_stderr": 0.031146796482972465,
        "acc_norm": 0.6860986547085202,
        "acc_norm_stderr": 0.031146796482972465
    },
    "harness|hendrycksTest-human_sexuality|5": {
        "acc": 0.7862595419847328,
        "acc_stderr": 0.0359546161177469,
        "acc_norm": 0.7862595419847328,
        "acc_norm_stderr": 0.0359546161177469
    },
    "harness|hendrycksTest-international_law|5": {
        "acc": 0.8099173553719008,
        "acc_stderr": 0.03581796951709282,
        "acc_norm": 0.8099173553719008,
        "acc_norm_stderr": 0.03581796951709282
    },
    "harness|hendrycksTest-jurisprudence|5": {
        "acc": 0.7962962962962963,
        "acc_stderr": 0.03893542518824847,
        "acc_norm": 0.7962962962962963,
        "acc_norm_stderr": 0.03893542518824847
    },
    "harness|hendrycksTest-logical_fallacies|5": {
        "acc": 0.7730061349693251,
        "acc_stderr": 0.03291099578615769,
        "acc_norm": 0.7730061349693251,
        "acc_norm_stderr": 0.03291099578615769
    },
    "harness|hendrycksTest-machine_learning|5": {
        "acc": 0.5,
        "acc_stderr": 0.04745789978762494,
        "acc_norm": 0.5,
        "acc_norm_stderr": 0.04745789978762494
    },
    "harness|hendrycksTest-management|5": {
        "acc": 0.7961165048543689,
        "acc_stderr": 0.03989139859531771,
        "acc_norm": 0.7961165048543689,
        "acc_norm_stderr": 0.03989139859531771
    },
    "harness|hendrycksTest-marketing|5": {
        "acc": 0.8760683760683761,
        "acc_stderr": 0.02158649400128137,
        "acc_norm": 0.8760683760683761,
        "acc_norm_stderr": 0.02158649400128137
    },
    "harness|hendrycksTest-medical_genetics|5": {
        "acc": 0.73,
        "acc_stderr": 0.0446196043338474,
        "acc_norm": 0.73,
        "acc_norm_stderr": 0.0446196043338474
    },
    "harness|hendrycksTest-miscellaneous|5": {
        "acc": 0.8288633461047255,
        "acc_stderr": 0.013468201614066307,
        "acc_norm": 0.8288633461047255,
        "acc_norm_stderr": 0.013468201614066307
    },
    "harness|hendrycksTest-moral_disputes|5": {
        "acc": 0.7514450867052023,
        "acc_stderr": 0.023267528432100174,
        "acc_norm": 0.7514450867052023,
        "acc_norm_stderr": 0.023267528432100174
    },
    "harness|hendrycksTest-moral_scenarios|5": {
        "acc": 0.4480446927374302,
        "acc_stderr": 0.016631976628930595,
        "acc_norm": 0.4480446927374302,
        "acc_norm_stderr": 0.016631976628930595
    },
    "harness|hendrycksTest-nutrition|5": {
        "acc": 0.7320261437908496,
        "acc_stderr": 0.025360603796242553,
        "acc_norm": 0.7320261437908496,
        "acc_norm_stderr": 0.025360603796242553
    },
    "harness|hendrycksTest-philosophy|5": {
        "acc": 0.707395498392283,
        "acc_stderr": 0.02583989833487798,
        "acc_norm": 0.707395498392283,
        "acc_norm_stderr": 0.02583989833487798
    },
    "harness|hendrycksTest-prehistory|5": {
        "acc": 0.7530864197530864,
        "acc_stderr": 0.023993501709042107,
        "acc_norm": 0.7530864197530864,
        "acc_norm_stderr": 0.023993501709042107
    },
    "harness|hendrycksTest-professional_accounting|5": {
        "acc": 0.4787234042553192,
        "acc_stderr": 0.029800481645628693,
        "acc_norm": 0.4787234042553192,
        "acc_norm_stderr": 0.029800481645628693
    },
    "harness|hendrycksTest-professional_law|5": {
        "acc": 0.4791395045632334,
        "acc_stderr": 0.012759117066518015,
        "acc_norm": 0.4791395045632334,
        "acc_norm_stderr": 0.012759117066518015
    },
    "harness|hendrycksTest-professional_medicine|5": {
        "acc": 0.7058823529411765,
        "acc_stderr": 0.02767846864214472,
        "acc_norm": 0.7058823529411765,
        "acc_norm_stderr": 0.02767846864214472
    },
    "harness|hendrycksTest-professional_psychology|5": {
        "acc": 0.6862745098039216,
        "acc_stderr": 0.018771683893528176,
        "acc_norm": 0.6862745098039216,
        "acc_norm_stderr": 0.018771683893528176
    },
    "harness|hendrycksTest-public_relations|5": {
        "acc": 0.6818181818181818,
        "acc_stderr": 0.04461272175910509,
        "acc_norm": 0.6818181818181818,
        "acc_norm_stderr": 0.04461272175910509
    },
    "harness|hendrycksTest-security_studies|5": {
        "acc": 0.7346938775510204,
        "acc_stderr": 0.028263889943784603,
        "acc_norm": 0.7346938775510204,
        "acc_norm_stderr": 0.028263889943784603
    },
    "harness|hendrycksTest-sociology|5": {
        "acc": 0.835820895522388,
        "acc_stderr": 0.026193923544454115,
        "acc_norm": 0.835820895522388,
        "acc_norm_stderr": 0.026193923544454115
    },
    "harness|hendrycksTest-us_foreign_policy|5": {
        "acc": 0.85,
        "acc_stderr": 0.03588702812826371,
        "acc_norm": 0.85,
        "acc_norm_stderr": 0.03588702812826371
    },
    "harness|hendrycksTest-virology|5": {
        "acc": 0.5481927710843374,
        "acc_stderr": 0.03874371556587953,
        "acc_norm": 0.5481927710843374,
        "acc_norm_stderr": 0.03874371556587953
    },
    "harness|hendrycksTest-world_religions|5": {
        "acc": 0.8362573099415205,
        "acc_stderr": 0.028380919596145866,
        "acc_norm": 0.8362573099415205,
        "acc_norm_stderr": 0.028380919596145866
    },
    "harness|truthfulqa:mc|0": {
        "mc1": 0.4834761321909425,
        "mc1_stderr": 0.017493940190057723,
        "mc2": 0.6447306680251751,
        "mc2_stderr": 0.015519245883344577
    },
    "harness|winogrande|5": {
        "acc": 0.8366219415943172,
        "acc_stderr": 0.010390695970273764
    },
    "harness|gsm8k|5": {
        "acc": 0.7202426080363912,
        "acc_stderr": 0.012364384016735319
    }
}
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_MaziyarPanahi__TheTop-5x7B-Instruct-S3-v0.1)

|             Metric              |Value|
|---------------------------------|----:|
|Avg.                             |74.03|
|AI2 Reasoning Challenge (25-Shot)|70.90|
|HellaSwag (10-Shot)              |88.00|
|MMLU (5-Shot)                    |65.13|
|TruthfulQA (0-shot)              |64.47|
|Winogrande (5-shot)              |83.66|
|GSM8k (5-shot)                   |72.02|
Downloads last month
482
Safetensors
Model size
7.24B params
Tensor type
BF16
·
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.

Collection including MaziyarPanahi/TheTop-5x7B-Instruct-S3-v0.1

Evaluation results