|
from typing import Optional |
|
from functools import partial |
|
|
|
from datasets import load_dataset |
|
from litdata import optimize, TokensLoader |
|
from litgpt.tokenizer import Tokenizer |
|
|
|
|
|
def batch_iterator(path: str, |
|
name: Optional[str]=None, |
|
data_dir: Optional[str]=None, |
|
data_files: Optional[str]=None, |
|
revision: Optional[str]=None, |
|
split: str='train', |
|
format: Optional[str]=None): |
|
assert format is not None |
|
|
|
dataset = load_dataset(path=path, |
|
name=name, |
|
data_dir=data_dir, |
|
data_files=data_files, |
|
revision=revision, |
|
split=split, |
|
trust_remote_code=True) |
|
|
|
for row in dataset: |
|
text = format.format(**row) |
|
yield text |
|
|
|
|
|
def tokenize_fn(datasets_config, tokenizer=None): |
|
for text in batch_iterator(**datasets_config): |
|
text_ids = tokenizer.encode(text, bos=False, eos=True) |
|
yield text_ids |
|
|
|
|
|
roles_map = { |
|
'system': 'system', |
|
'user': 'user', |
|
'human': 'user', |
|
'assistant': 'assistant', |
|
'gpt': 'assistant', |
|
'AI': 'assistant', |
|
} |
|
|
|
|
|
datasets_configs = [ |
|
|
|
|
|
|
|
|
|
{'path': 'arcee-ai/The-Tome', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, |
|
{'path': 'teknium/OpenHermes-2.5', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, |
|
|
|
|
|
{'path': 'NousResearch/hermes-function-calling-v1', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, |
|
|
|
|
|
{'path': 'ai2-adapt-dev/openmath-2-math', 'field': 'messages'}, |
|
|
|
|
|
{'path': 'arcee-ai/agent-data', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, |
|
|
|
|
|
{'path': 'AtlasUnified/atlas-converse', 'field': 'conversations', 'transform': lambda msgs: [{'role': roles_map[m['from']], 'content': m['value']} for m in msgs]}, |
|
{'path': 'PJMixers/hieunguyenminh_roleplay-deduped-ShareGPT', 'field': 'conversations'}, |
|
{'path': 'TokenBender/roleplay_alpaca', 'transform': lambda r: [{'role': 'user', 'content': r['instruction']}, {'role': 'assistant', 'content': r['output']}]}, |
|
|
|
|
|
|
|
|
|
|
|
|
|
{'path': 'dvilasuero/reflection-v1-gpt-4o-judge', 'transform': lambda r: [{'role': 'system', 'content': r['system']}, {'role': 'user', 'content': r['prompt']}, {'role': 'assistant', 'content': r['response']}]}, |
|
{'path': 'dvilasuero/reflection-v1-openai-o-mini-judge', 'transform': lambda r: [{'role': 'system', 'content': r['system']}, {'role': 'user', 'content': r['prompt']}, {'role': 'assistant', 'content': r['response']}]}, |
|
{'path': 'flozi00/reflection-qwen2.5-72b-260924', 'transform': lambda r: [r['system'][0], {'role': 'user', 'content': r['input']}, {'role': 'assistant', 'content': r['reflection'] + '\n' + r['output']}]}, |
|
{'path': 'gretelai/synthetic-gsm8k-reflection-405b', 'split': 'train+test', 'transform': lambda r: [{'role': 'user', 'content': r['question']}, {'role': 'assistant', 'content': r['answer_with_tags']}]}, |
|
|
|
|
|
{'path': 'KingNish/reasoning-base-20k', 'field': 'conversations'}, |
|
] |
|
|
|
outputs = optimize( |
|
fn=partial(tokenize_fn, tokenizer=Tokenizer('..')), |
|
inputs=datasets_configs, |
|
output_dir='../contrain-data/', |
|
|
|
chunk_size=(2049 * 8012), |
|
num_workers=32, |
|
|
|
) |
|
|