prefix
stringlengths 72
385
| suffix
stringlengths 10
385
| reference
stringlengths 6
97
|
---|---|---|
import argparse
import math
import os
import yaml
from tqdm import tqdm
| from torch.utils.data import DataLoader
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
default_data_collator,
| import torch
|
from torch.utils.data import DataLoader
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
default_data_collator,
| from accelerate import Accelerator, DistributedDataParallelKwargs, InitProcessGroupKwargs
from src.data import load_data
from src.tokenizer import AudioTokenizer, get_start_tokens
from src.utils import save_checkpoint, fix_checkpoint, get_exp_name
# Parse arguments
| get_scheduler,
|
from accelerate import Accelerator, DistributedDataParallelKwargs, InitProcessGroupKwargs
from src.data import load_data
from src.tokenizer import AudioTokenizer, get_start_tokens
from src.utils import save_checkpoint, fix_checkpoint, get_exp_name
# Parse arguments
| parser.add_argument(
"--config", type=str, help="Path to the config.yaml file", required=True
args = parser.parse_args()
# Load config
with open(args.config, "r") as file:
| parser = argparse.ArgumentParser(description="Train a model with configuration.")
|
parser.add_argument(
"--config", type=str, help="Path to the config.yaml file", required=True
args = parser.parse_args()
# Load config
with open(args.config, "r") as file:
| base_model = config["base_model"]
checkpoint_path = config.get("checkpoint_path")
save_dir = config["save_dir"]
data = config["data"]
start_audio_token = config["start_audio_token"]
| config = yaml.safe_load(file)
|
base_model = config["base_model"]
checkpoint_path = config.get("checkpoint_path")
save_dir = config["save_dir"]
data = config["data"]
start_audio_token = config["start_audio_token"]
| path_to_cache = config["path_to_cache"]
checkpointing_steps = int(config['checkpointing_steps'])
max_grad_norm = float(config['max_grad_norm'])
torch.backends.cuda.matmul.allow_tf32 = config["allow_tf32"]
torch.backends.cudnn.allow_tf32 = config["allow_tf32"]
| end_audio_token = config["end_audio_token"]
|
path_to_cache = config["path_to_cache"]
checkpointing_steps = int(config['checkpointing_steps'])
max_grad_norm = float(config['max_grad_norm'])
torch.backends.cuda.matmul.allow_tf32 = config["allow_tf32"]
torch.backends.cudnn.allow_tf32 = config["allow_tf32"]
| model,
dataloader,
accelerator,
optimizer,
lr_scheduler,
| def train(
|
model,
dataloader,
accelerator,
optimizer,
lr_scheduler,
| progress_bar,
max_train_steps,
save_dir
model.train()
total_loss = 0
| completed_steps,
|
progress_bar,
max_train_steps,
save_dir
model.train()
total_loss = 0
| for step, batch in enumerate(dataloader):
with accelerator.accumulate(model):
# Forward pass
outputs = model(**batch)
loss = outputs.loss
| acc_loss = 0
|
for step, batch in enumerate(dataloader):
with accelerator.accumulate(model):
# Forward pass
outputs = model(**batch)
loss = outputs.loss
| total_loss += last_loss
acc_loss += last_loss
accelerator.backward(loss)
del batch, loss, outputs
torch.cuda.empty_cache()
| last_loss = loss.detach().float()
|
total_loss += last_loss
acc_loss += last_loss
accelerator.backward(loss)
del batch, loss, outputs
torch.cuda.empty_cache()
| accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
| if accelerator.sync_gradients:
|
accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
| acc_loss = acc_loss / int(config["gradient_accumulation_steps"])
accelerator.log({"loss": acc_loss.item()})
acc_loss = 0
if completed_steps % checkpointing_steps == 0:
save_checkpoint(model, accelerator, tokenizer, optimizer, lr_scheduler, save_dir, checkpointing_steps)
| completed_steps += 1
|
acc_loss = acc_loss / int(config["gradient_accumulation_steps"])
accelerator.log({"loss": acc_loss.item()})
acc_loss = 0
if completed_steps % checkpointing_steps == 0:
save_checkpoint(model, accelerator, tokenizer, optimizer, lr_scheduler, save_dir, checkpointing_steps)
| if completed_steps >= max_train_steps:
break
return total_loss / len(dataloader), completed_steps
def eval(
model,
| torch.cuda.empty_cache()
|
if completed_steps >= max_train_steps:
break
return total_loss / len(dataloader), completed_steps
def eval(
model,
| accelerator,
epoch,
completed_steps,
train_loss,
model.eval()
| dataloader,
|
accelerator,
epoch,
completed_steps,
train_loss,
model.eval()
| eval_progress_bar = tqdm(dataloader, desc=f"Evaluating Epoch {epoch}", leave=False)
for batch in eval_progress_bar:
with torch.no_grad():
# Forward pass
outputs = model(**batch)
| losses = []
|
eval_progress_bar = tqdm(dataloader, desc=f"Evaluating Epoch {epoch}", leave=False)
for batch in eval_progress_bar:
with torch.no_grad():
# Forward pass
outputs = model(**batch)
| losses.append(accelerator.gather_for_metrics(loss.repeat(int(config["eval_batch_size"]))))
del outputs
losses = torch.cat(losses)
try:
eval_loss = torch.mean(losses)
| loss = outputs.loss
|
losses.append(accelerator.gather_for_metrics(loss.repeat(int(config["eval_batch_size"]))))
del outputs
losses = torch.cat(losses)
try:
eval_loss = torch.mean(losses)
| except OverflowError:
perplexity = float("inf")
print(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
base_log = {
"perplexity": perplexity,
| perplexity = math.exp(eval_loss)
|
except OverflowError:
perplexity = float("inf")
print(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}")
base_log = {
"perplexity": perplexity,
| "train_loss": train_loss.item(),
"epoch": epoch,
"step": completed_steps,
}
accelerator.log(base_log, step=completed_steps)
| "eval_loss": eval_loss.item(),
|
"train_loss": train_loss.item(),
"epoch": epoch,
"step": completed_steps,
}
accelerator.log(base_log, step=completed_steps)
| import datetime
timeout = datetime.timedelta(seconds=100000000)
accelerator = Accelerator(
gradient_accumulation_steps=int(config["gradient_accumulation_steps"]),
mixed_precision="no",
| if __name__ == "__main__":
|
import datetime
timeout = datetime.timedelta(seconds=100000000)
accelerator = Accelerator(
gradient_accumulation_steps=int(config["gradient_accumulation_steps"]),
mixed_precision="no",
| kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=False),
InitProcessGroupKwargs(timeout=timeout)],
)
device = accelerator.device
exp_save_dir = os.path.join(save_dir, get_exp_name(config))
| log_with="wandb",
|
kwargs_handlers=[DistributedDataParallelKwargs(find_unused_parameters=False),
InitProcessGroupKwargs(timeout=timeout)],
)
device = accelerator.device
exp_save_dir = os.path.join(save_dir, get_exp_name(config))
| tokenizer = AutoTokenizer.from_pretrained(base_model, cache_dir=path_to_cache)
model = AutoModelForCausalLM.from_pretrained(
base_model, attn_implementation="sdpa", torch_dtype=torch.bfloat16, cache_dir=path_to_cache
)
model.gradient_checkpointing_enable()
| os.makedirs(exp_save_dir, exist_ok=True)
|
tokenizer = AutoTokenizer.from_pretrained(base_model, cache_dir=path_to_cache)
model = AutoModelForCausalLM.from_pretrained(
base_model, attn_implementation="sdpa", torch_dtype=torch.bfloat16, cache_dir=path_to_cache
)
model.gradient_checkpointing_enable()
| {"additional_special_tokens": [start_audio_token, end_audio_token]}
)
n_tokens = len(tokenizer)
print("Not audio tokens:", n_tokens)
start_audio_token_id = tokenizer(start_audio_token)["input_ids"][-1]
| tokenizer.add_special_tokens(
|
{"additional_special_tokens": [start_audio_token, end_audio_token]}
)
n_tokens = len(tokenizer)
print("Not audio tokens:", n_tokens)
start_audio_token_id = tokenizer(start_audio_token)["input_ids"][-1]
| tokens_config = get_start_tokens(config["quantizer"], n_tokens)
quantizer = AudioTokenizer(config["quantizer"], tokens_config)
codebook_size = config["quantizer"]["speech"]["n_new_tokens"] + config["quantizer"]["wav"]["n_new_tokens"]
train_dataset, val_dataset = load_data(data, tokenizer, quantizer, config)
model.resize_token_embeddings(n_tokens + codebook_size)
| end_audio_token_id = tokenizer(end_audio_token)["input_ids"][-1]
|
tokens_config = get_start_tokens(config["quantizer"], n_tokens)
quantizer = AudioTokenizer(config["quantizer"], tokens_config)
codebook_size = config["quantizer"]["speech"]["n_new_tokens"] + config["quantizer"]["wav"]["n_new_tokens"]
train_dataset, val_dataset = load_data(data, tokenizer, quantizer, config)
model.resize_token_embeddings(n_tokens + codebook_size)
| model = fix_checkpoint(model, checkpoint_path)
train_dataloader = DataLoader(
train_dataset,
shuffle=True,
collate_fn=default_data_collator,
| if checkpoint_path is not None:
|
model = fix_checkpoint(model, checkpoint_path)
train_dataloader = DataLoader(
train_dataset,
shuffle=True,
collate_fn=default_data_collator,
| num_workers=16
)
eval_dataloader = DataLoader(
val_dataset,
collate_fn=default_data_collator,
| batch_size=int(config["train_batch_size"]),
|
num_workers=16
)
eval_dataloader = DataLoader(
val_dataset,
collate_fn=default_data_collator,
| num_workers=16
)
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
| batch_size=int(config["eval_batch_size"]),
|
num_workers=16
)
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
| p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay) and p.requires_grad
],
"weight_decay": float(config["weight_decay"]),
| "params": [
|
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay) and p.requires_grad
],
"weight_decay": float(config["weight_decay"]),
| {
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay) and p.requires_grad
| },
|
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay) and p.requires_grad
| "weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters, lr=float(config["learning_rate"]), # fused=True
| ],
|
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters, lr=float(config["learning_rate"]), # fused=True
| num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / int(config["gradient_accumulation_steps"])
)
max_train_steps = int(config["num_train_epochs"]) * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
| )
|
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / int(config["gradient_accumulation_steps"])
)
max_train_steps = int(config["num_train_epochs"]) * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
| optimizer=optimizer,
num_warmup_steps=int(config["num_warmup_steps"]) * accelerator.num_processes,
num_training_steps=max_train_steps * accelerator.num_processes,
)
if checkpoint_path is not None:
| name=config["lr_scheduler_type"],
|
optimizer=optimizer,
num_warmup_steps=int(config["num_warmup_steps"]) * accelerator.num_processes,
num_training_steps=max_train_steps * accelerator.num_processes,
)
if checkpoint_path is not None:
| sceduler_state = torch.load(os.path.join(checkpoint_path, "scheduler.pt"))
optimizer.load_state_dict(optim_state)
lr_scheduler.load_state_dict(sceduler_state)
# model = freeze(model, freeze_other=False, freeze_ff=True, freeze_ff_layers=[31])
(
| optim_state = torch.load(os.path.join(checkpoint_path, "optimizer.pt"))
|
sceduler_state = torch.load(os.path.join(checkpoint_path, "scheduler.pt"))
optimizer.load_state_dict(optim_state)
lr_scheduler.load_state_dict(sceduler_state)
# model = freeze(model, freeze_other=False, freeze_ff=True, freeze_ff_layers=[31])
(
| optimizer,
train_dataloader,
eval_dataloader,
lr_scheduler,
) = accelerator.prepare(
| model,
|
optimizer,
train_dataloader,
eval_dataloader,
lr_scheduler,
) = accelerator.prepare(
| )
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / int(config["gradient_accumulation_steps"])
)
max_train_steps = config["num_train_epochs"] * num_update_steps_per_epoch
| model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
)
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / int(config["gradient_accumulation_steps"])
)
max_train_steps = config["num_train_epochs"] * num_update_steps_per_epoch
| accelerator.init_trackers(
config["wandb_project_name"], {"lr_scheduler_type": config["lr_scheduler_type"]}
)
total_batch_size = (
config["train_batch_size"]
| num_train_epochs = math.ceil(max_train_steps / num_update_steps_per_epoch)
|
accelerator.init_trackers(
config["wandb_project_name"], {"lr_scheduler_type": config["lr_scheduler_type"]}
)
total_batch_size = (
config["train_batch_size"]
| * int(config["gradient_accumulation_steps"])
)
print("***** Running training *****")
print(f" Num examples = {len(train_dataset)}")
print(f" Num Epochs = {num_train_epochs}")
| * accelerator.num_processes
|
* int(config["gradient_accumulation_steps"])
)
print("***** Running training *****")
print(f" Num examples = {len(train_dataset)}")
print(f" Num Epochs = {num_train_epochs}")
| print(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
print(f" Gradient Accumulation steps = {config['gradient_accumulation_steps']}")
print(f" Total optimization steps = {max_train_steps}")
| print(f" Instantaneous batch size per device = {config['train_batch_size']}")
|
print(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
print(f" Gradient Accumulation steps = {config['gradient_accumulation_steps']}")
print(f" Total optimization steps = {max_train_steps}")
| range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
starting_epoch = 0
for epoch in range(starting_epoch, num_train_epochs):
| progress_bar = tqdm(
|
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
starting_epoch = 0
for epoch in range(starting_epoch, num_train_epochs):
| model,
train_dataloader,
accelerator,
optimizer,
lr_scheduler,
| train_loss, completed_steps = train(
|
model,
train_dataloader,
accelerator,
optimizer,
lr_scheduler,
| progress_bar,
max_train_steps,
exp_save_dir
)
print(f"EPOCH {epoch + 1} train loss:", train_loss)
| completed_steps,
|
progress_bar,
max_train_steps,
exp_save_dir
)
print(f"EPOCH {epoch + 1} train loss:", train_loss)
| model,
eval_dataloader,
accelerator,
epoch,
completed_steps,
| eval(
|
model,
eval_dataloader,
accelerator,
epoch,
completed_steps,
| )
| train_loss,
|
import argparse
import torch
import torchaudio
import yaml
from transformers import AutoTokenizer, AutoModelForCausalLM
| start_audio_token = "<soa>"
end_audio_token = "<eoa>"
end_sequence_token = "<eos>"
device = "cuda"
parser = argparse.ArgumentParser(description="Train a model with configuration.")
| from src.tokenizer import get_start_tokens, AudioTokenizer
|
start_audio_token = "<soa>"
end_audio_token = "<eoa>"
end_sequence_token = "<eos>"
device = "cuda"
parser = argparse.ArgumentParser(description="Train a model with configuration.")
| "--config", type=str, help="Path to the config.yaml file", required=True
parser.add_argument(
"--asr", action="store_true", help="asr/tts",
parser.add_argument(
"--audio_path", type=str, help="Path to audio if asr is used",
| parser.add_argument(
|
"--config", type=str, help="Path to the config.yaml file", required=True
parser.add_argument(
"--asr", action="store_true", help="asr/tts",
parser.add_argument(
"--audio_path", type=str, help="Path to audio if asr is used",
| "--text", type=str, help="Text if tts is used",
args = parser.parse_args()
# Load config
with open(args.config, "r") as file:
config = yaml.safe_load(file)
| parser.add_argument(
|
"--text", type=str, help="Text if tts is used",
args = parser.parse_args()
# Load config
with open(args.config, "r") as file:
config = yaml.safe_load(file)
| text_tokenized = tokenizer(text, return_tensors="pt")
text_input_tokens = text_tokenized["input_ids"].to(device)
soa = tokenizer(start_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
eoa = tokenizer(end_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
text_tokens = torch.cat([text_input_tokens, soa], dim=1)
| def infer_text_to_audio(text, model, tokenizer, quantizer, max_seq_length=1024):
|
text_tokenized = tokenizer(text, return_tensors="pt")
text_input_tokens = text_tokenized["input_ids"].to(device)
soa = tokenizer(start_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
eoa = tokenizer(end_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
text_tokens = torch.cat([text_input_tokens, soa], dim=1)
| output_audio_tokens = model.generate(
text_tokens,
attention_mask=attention_mask,
max_new_tokens=max_seq_length,
repetition_penalty=1.1,
| attention_mask = torch.ones(text_tokens.size(), device=device)
|
output_audio_tokens = model.generate(
text_tokens,
attention_mask=attention_mask,
max_new_tokens=max_seq_length,
repetition_penalty=1.1,
| num_beams=5,
no_repeat_ngram_size=3,
)
audio_signal = decode_tts(output_audio_tokens[0], quantizer, 3, len(tokenizer), soa, eoa)
return audio_signal
| length_penalty=1.2,
|
num_beams=5,
no_repeat_ngram_size=3,
)
audio_signal = decode_tts(output_audio_tokens[0], quantizer, 3, len(tokenizer), soa, eoa)
return audio_signal
| audio_data, sample_rate = torchaudio.load(audio_path)
audio = audio_data.view(1, -1).float().to(device)
bandwidth_id = torch.tensor([0])
_, codes = quantizer.encode_infer(audio, bandwidth_id=bandwidth_id)
raw_audio_tokens = codes + len(tokenizer) + 1024
| def infer_audio_to_text(audio_path, model, tokenizer, quantizer, max_seq_length=1024, top_k=20):
|
audio_data, sample_rate = torchaudio.load(audio_path)
audio = audio_data.view(1, -1).float().to(device)
bandwidth_id = torch.tensor([0])
_, codes = quantizer.encode_infer(audio, bandwidth_id=bandwidth_id)
raw_audio_tokens = codes + len(tokenizer) + 1024
| eoa = tokenizer(end_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
audio_tokens = torch.cat([soa, raw_audio_tokens.view(1, -1), eoa], dim=1)
tokens = torch.cat([audio_tokens], dim=1)
attention_mask = torch.ones(tokens.size(), device=device)
output_text_tokens = model.generate(
| soa = tokenizer(start_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
|
eoa = tokenizer(end_audio_token, return_tensors="pt")["input_ids"][:, -1:].to(device)
audio_tokens = torch.cat([soa, raw_audio_tokens.view(1, -1), eoa], dim=1)
tokens = torch.cat([audio_tokens], dim=1)
attention_mask = torch.ones(tokens.size(), device=device)
output_text_tokens = model.generate(
| attention_mask=attention_mask,
max_new_tokens=max_seq_length,
do_sample=False,
num_beams=5, no_repeat_ngram_size=4,
length_penalty=2.0,
| tokens,
|