Youtube crawl dataset
Collection
Youtube dataset gathered using crawlers.
•
9 items
•
Updated
Error code: TooBigContentError
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
We crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 18.7k hours.
URLs data at https://github.com/mesolitica/malaya-speech/tree/master/data/youtube/data
Notebooks at https://github.com/mesolitica/malaya-speech/tree/master/data/youtube
import pandas as pd
import json
from datasets import Audio
from torch.utils.data import DataLoader, Dataset
chunks = 30
sr = 16000
class Train(Dataset):
def __init__(self, indices, maxlen_cache_df=5, maxlen_cache_audio=50):
self.indices = {}
for k, v in indices.items():
for i in range(int(k), v['start'] + v['end'], 1):
self.indices[i] = v
self.max_index = len(self.indices)
self.cache_df = {}
self.cache_audio = {}
self.maxlen_cache_df = maxlen_cache_df
self.maxlen_cache_audio = maxlen_cache_audio
self.audio = Audio(sampling_rate=16000)
def __len__(self):
return self.max_index
def __getitem__(self, item):
if item < 0:
item = self.max_index + item
v = self.indices[item]
key_row = f"{v['filename']}-{v['i']}"
chunk_index = item - v['start']
if key_row not in self.cache_audio:
if v['filename'] not in self.cache_df:
df = pd.read_parquet(v['filename'])
if len(self.cache_df) >= self.maxlen_cache_df:
keys = list(self.cache_df.keys())
self.cache_df.pop(sorted(keys)[0], None)
self.cache_df[v['filename']] = df
else:
df = self.cache_df[v['filename']]
row = df.iloc[int(v['i'])]
audio = self.audio.decode_example(self.audio.encode_example(row['filename']))
if len(self.cache_audio) >= self.maxlen_cache_audio:
keys = list(self.cache_audio.keys())
self.cache_audio.pop(sorted(keys)[0], None)
self.cache_audio[key_row] = audio
else:
audio = self.cache_audio[key_row]
return {
'array': audio['array'][(chunks * sr) * chunk_index: (chunks * sr) * (chunk_index + 1)]
}
with open('crawl-youtube-global-indices.json') as fopen:
global_indices = json.load(fopen)
train = Train(global_indices)
train[0]
{'array': array([ 0. , 0. , 0. , ..., -0.00845753,
0.00168016, -0.00606468])}
This is global hashing indices if the audio chunked with 30 seconds, read more at https://github.com/mesolitica/malaysian-dataset/tree/master/speech-to-text-semisupervised/pseudolabel-whisper
All the videos, songs, images, and graphics used in the video belong to their respective owners and I does not claim any right over them.
Copyright Disclaimer under section 107 of the Copyright Act of 1976, allowance is made for "fair use" for purposes such as criticism, comment, news reporting, teaching, scholarship, education and research. Fair use is a use permitted by copyright statute that might otherwise be infringing.