LLM-model-cards / Config.py
Blair Yang
update
487b80b
raw
history blame
739 Bytes
import random
DATASETS = [
'mmlu',
# 'Anthropic_safety_eval'
]
TOPICS = {
'mmlu':
[
# 'high_school_biology',
'high_school_physics'
],
'Anthropic_safety_eval':
[
'myopia'
]
}
MODELS = [
# 'Llama-2-70b-chat-hf',
# 'Llama-2-13b-chat-hf',
'Mixtral-8x7B-Instruct-v0.1',
# 'Mistral-7B-Instruct-v0.2'
]
RANDOM_SEED = 42
DEFAULT_SUMMARIZER = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
# DEFAULT_SUMMARIZER = 'NousResearch/Nous-Hermes-2-Mistral-7B-DPO'
# DEFAULT_SUMMARIZER = 'mistralai/Mistral-7B-Instruct-v0.2'
DEFAULT_DATASET = "mmlu"
DEFAULT_TOPIC = random.choice(TOPICS[DEFAULT_DATASET])
CARD_DIR = "cards/"
DATASET_DIR = "datasets/"