import numpy as np import os REPO_PATH = '/'.join(os.path.abspath(__file__).split('/')[:-3]) + '/' AUDIO_PATH = REPO_PATH + 'data/music/audio/' MIDI_PATH = REPO_PATH + 'data/music/midi/' MUSIC_PATH = REPO_PATH + 'data/music/' PROCESSED_PATH = REPO_PATH + 'data/music/processed/' ENCODED_PATH = REPO_PATH + 'data/music/encoded/' HANDCODED_REP_PATH = MUSIC_PATH + 'handcoded_reps/' DATASET_PATH = REPO_PATH + 'data/music/encoded_new_structured/diverse_piano/' SYNTH_RECORDED_AUDIO_PATH = AUDIO_PATH + 'synth_audio_recorded/' SYNTH_RECORDED_MIDI_PATH = MIDI_PATH + 'synth_midi_recorded/' CHECKPOINTS_PATH = REPO_PATH + 'checkpoints/' EXPERIMENT_PATH = REPO_PATH + 'experiments/' SEED = 0 # params for data download ALL_URL_PATH = REPO_PATH + 'data/music/audio/all_urls.pickle' ALL_FAILED_URL_PATH = REPO_PATH + 'data/music/audio/all_failed_urls.pickle' RATE_AUDIO_SAVE = 16000 FROM_URL_PATH = AUDIO_PATH + 'from_url/' # params transcription CHKPT_PATH_TRANSCRIPTION = REPO_PATH + 'checkpoints/piano_transcription/note_F1=0.9677_pedal_F1=0.9186.pth' # transcriptor chkpt path FPS = 16000 RANDOM_CROP = False # whether to use random crops in case of cropped audio CROP_LEN = 26 * 60 # params midi scrubbing and processing MAX_DEPTH = 5 # max depth when searching in folders for audio files MAX_GAP_IN_SONG = 10 # in secs MIN_LEN = 20 # actual min len could go down to MIN_LEN - 2 * (REMOVE_FIRST_AND_LAST / 5) MAX_LEN = 25 * 60 # maximum audio len for playlist downloads, and maximum audio length for transcription (in sec) MIN_NB_NOTES = 80 # min nb of notes per minute of recording REMOVE_FIRST_AND_LAST = 10 # will be divided by 5 if cutting this makes the song fall below min len # parameters encoding NOISE_INJECTED = True AUGMENTATION = True NB_AUG = 4 if AUGMENTATION else 0 RANGE_NOTE_ON = 128 RANGE_NOTE_OFF = 128 RANGE_VEL = 32 RANGE_TIME_SHIFT = 100 MAX_EMBEDDING = RANGE_VEL + RANGE_NOTE_OFF + RANGE_TIME_SHIFT + RANGE_NOTE_ON MAX_TEST_SIZE = 1000 CHECKSUM_PATH = REPO_PATH + 'data/music/midi/checksum.pickle' CHUNK_SIZE = 512 ALL_AUGMENTATIONS = [] for p in [-3, -2, -1, 1, 2, 3]: ALL_AUGMENTATIONS.append((p)) ALL_AUGMENTATIONS = np.array(ALL_AUGMENTATIONS) ALL_NOISE = [] for s in [-5, -2.5, 0, 2.5, 5]: for p in np.arange(-6, 7): if not ((s == 0) and (p==0)): ALL_NOISE.append((s, p)) ALL_NOISE = np.array(ALL_NOISE) # music transformer params REP_MODEL_NAME = REPO_PATH + "checkpoints/music_representation/sentence_embedding/smallbert_b256_r128_1/best_model" MUSIC_REP_PATH = REPO_PATH + "checkpoints/b256_r128_represented/" MUSIC_NN_PATH = REPO_PATH + "checkpoints/music_representation/b256_r128_represented/nn_model.pickle" TRANSLATION_VAE_CHKP_PATH = REPO_PATH + "checkpoints/music2cocktails/music2flavor/b256_r128_classif001_ld40_meanstd_regground2.5_egg_bubbles/" # piano solo evaluation # META_DATA_PIANO_EVAL_PATH = REPO_PATH + 'data/music/audio/is_piano.csv' # CHKPT_PATH_PIANO_EVAL = REPO_PATH + 'data/checkpoints/piano_detection/piano_solo_model_32k.pth'