Spaces:
Sleeping
Sleeping
File size: 5,257 Bytes
3f608c6 2843be3 b9a642a 3b00a19 9e58b03 166f484 9e58b03 214fb7b 222b31d f1b5654 a8bfe22 41f032f 3f608c6 bf8c5bd 3965e1f bf8c5bd 4a4b7c0 06e3150 bf8c5bd b9a642a bf8c5bd b9a642a bf8c5bd b9a642a bf8c5bd b9a642a bf8c5bd 9e58b03 166f484 9e58b03 60f8351 88dd848 a8bfe22 cda58fe 302bcc8 3e73fc6 bf8c5bd b9a642a bf8c5bd 9e58b03 d3f75b3 a8bfe22 d3f75b3 bf8c5bd 40ba0ea bcf3537 b9a642a bcf3537 b9a642a bcf3537 cd6b52a b9a642a cd6b52a b9a642a cd6b52a 9e58b03 88dd848 9e58b03 40ba0ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
MODEL_MAP = {
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # [Recommended]
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
"gemma-7b": "google/gemma-1.1-7b-it",
"openchat-3.5": "openchat/openchat-3.5-0106",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-4bit",
"llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
"zephyr-141b": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
"default": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Qwen2-72B": "Qwen/Qwen2-72B-Instruct",
"Qwen2-7B-Instruct": "Qwen/Qwen2-7B-Instruct",
"Qwen2-1.5B-Instruct" : "Qwen/Qwen2-1.5B-Instruct",
"llama3-8b" : "meta-llama/Meta-Llama-3-8B-Instruct"
}
AVAILABLE_MODELS = list(MODEL_MAP.keys())
# PRO_MODELS = ["command-r-plus", "llama3-70b", "zephyr-141b", "Qwen2-72B"]
PRO_MODELS = []
STOP_SEQUENCES_MAP = {
# https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1/blob/main/tokenizer_config.json#L33
"mixtral-8x7b": "</s>",
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO/blob/main/tokenizer_config.json#L50
"nous-mixtral-8x7b": "<|im_end|>",
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/tokenizer_config.json#L33
"mistral-7b": "</s>",
# https://huggingface.co/01-ai/Yi-1.5-34B-Chat/blob/main/tokenizer_config.json#L42
"yi-1.5-34b": "<|im_end|>",
# https://huggingface.co/google/gemma-1.1-7b-it/blob/main/tokenizer_config.json#L1509
"gemma-7b": "<eos>",
"openchat-3.5": "<|end_of_turn|>",
# https://huggingface.co/CohereForAI/c4ai-command-r-plus-4bit/blob/main/tokenizer_config.json#L305
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
# https://huggingface.co/Qwen/Qwen2-72B-Instruct/blob/main/tokenizer_config.json#L30
"Qwen2-72B": "<|im_end|>",
"Qwen2-7B-Instruct": "<|im_end|>",
"Qwen2-1.5B-Instruct": "<|im_end|>",
"llama3-8b" : "<|eot_id|>",
"llama3-70b" : "<|eot_id|>"
}
TOKEN_LIMIT_MAP = {
"mixtral-8x7b": 32768,
"nous-mixtral-8x7b": 32768,
"mistral-7b": 32768,
"yi-1.5-34b": 4096,
"gemma-7b": 8192,
"openchat-3.5": 8192,
"command-r-plus": 32768,
"llama3-70b": 8192,
"zephyr-141b": 2048,
"gpt-3.5-turbo": 8192,
"Qwen2-72B": 32768,
"Qwen2-7B-Instruct": 32768,
"Qwen2-1.5B-Instruct": 32768,
"llama3-8b": 8192,
}
TOKEN_RESERVED = 20
# https://platform.openai.com/docs/api-reference/models/list
AVAILABLE_MODELS_DICTS = [
{
"id": "mixtral-8x7b",
"description": "[mistralai/Mixtral-8x7B-Instruct-v0.1]: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
"object": "model",
"created": 1700000000,
"owned_by": "mistralai",
},
{
"id": "nous-mixtral-8x7b",
"description": "[NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO]: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"object": "model",
"created": 1700000000,
"owned_by": "NousResearch",
},
{
"id": "mistral-7b",
"description": "[mistralai/Mistral-7B-Instruct-v0.2]: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
"object": "model",
"created": 1700000000,
"owned_by": "mistralai",
},
{
"id": "yi-1.5-34b",
"description": "[01-ai/Yi-1.5-34B-Chat]: https://huggingface.co/01-ai/Yi-1.5-34B-Chat",
"object": "model",
"created": 1700000000,
"owned_by": "01-ai",
},
{
"id": "gemma-7b",
"description": "[google/gemma-1.1-7b-it]: https://huggingface.co/google/gemma-1.1-7b-it",
"object": "model",
"created": 1700000000,
"owned_by": "Google",
},
{
"id": "openchat-3.5",
"description": "[openchat/openchat-3.5-0106]: https://huggingface.co/openchat/openchat-3.5-0106",
"object": "model",
"created": 1700000000,
"owned_by": "openchat"
},
{
"id": "command-r-plus",
"description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co/CohereForAI/c4ai-command-r-plus",
"object": "model",
"created": 1700000000,
"owned_by": "CohereForAI"
},
{
"id": "llama3-70b",
"description": "[meta-llama/Meta-Llama-3-70B]: https://huggingface.co/meta-llama/Meta-Llama-3-70B",
"object": "model",
"created": 1700000000,
"owned_by": "Meta"
},
{
"id": "zephyr-141b",
"description": "[HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1]: https://huggingface.co/HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
"object": "model",
"created": 1700000000,
"owned_by": "Huggingface"
},
{
"id": "gpt-3.5-turbo",
"description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
"object": "model",
"created": 1700000000,
"owned_by": "OpenAI"
},
{
"id": "Qwen2-72B",
"description": "[Qwen/Qwen2-72B]: https://huggingface.co/Qwen/Qwen2-72B",
"object": "model",
"created": 1700000000,
"owned_by": "Qwen"
}
]
|