brando commited on
Commit
94472d9
1 Parent(s): e330ab1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +69 -0
README.md CHANGED
@@ -25,3 +25,72 @@ configs:
25
  - split: test
26
  path: data/test-*
27
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  - split: test
26
  path: data/test-*
27
  ---
28
+
29
+ Script that created it
30
+
31
+ ```python
32
+ import os
33
+ from huggingface_hub import login
34
+ from datasets import Dataset, DatasetDict, load_dataset
35
+ from concurrent.futures import ThreadPoolExecutor
36
+
37
+ # Function to load the Hugging Face API token from a file
38
+ def load_token(file_path: str) -> str:
39
+ """Load API token from a specified file path."""
40
+ with open(os.path.expanduser(file_path)) as f:
41
+ return f.read().strip()
42
+
43
+ # Function to log in to Hugging Face using a token
44
+ def login_to_huggingface(token: str) -> None:
45
+ """Authenticate with Hugging Face Hub."""
46
+ login(token=token)
47
+ print("Login successful")
48
+
49
+ # Function to sample a specific number of examples from a dataset split
50
+ def sample_from_split(split_name: str, num_samples: int) -> list:
51
+ """Sample a specified number of examples from a dataset split."""
52
+ c4_split = load_dataset("allenai/c4", "en", split=split_name, streaming=True)
53
+ samples = []
54
+ for i, example in enumerate(c4_split):
55
+ if i >= num_samples:
56
+ break
57
+ samples.append(example["text"])
58
+ return samples
59
+
60
+ # Main function to create a smaller C4 dataset with three subsets and upload it
61
+ def main() -> None:
62
+ # Step 1: Load token and log in
63
+ key_file_path: str = "/lfs/skampere1/0/brando9/keys/brandos_hf_token.txt"
64
+ token: str = load_token(key_file_path)
65
+ login_to_huggingface(token)
66
+
67
+ # Step 2: Define sampling parameters
68
+ num_samples = 10000
69
+
70
+ # Step 3: Sample subsets concurrently
71
+ with ThreadPoolExecutor(max_workers=3) as executor:
72
+ future_train = executor.submit(sample_from_split, "train", num_samples)
73
+ future_val = executor.submit(sample_from_split, "validation", num_samples)
74
+ future_test = executor.submit(sample_from_split, "validation", num_samples * 2)
75
+
76
+ train_samples = future_train.result()
77
+ val_samples = future_val.result()
78
+ test_samples = future_test.result()[num_samples:] # Second 10k from validation for test
79
+
80
+ # Step 4: Create DatasetDict
81
+ small_c4_dataset = DatasetDict({
82
+ "train": Dataset.from_dict({"text": train_samples}),
83
+ "validation": Dataset.from_dict({"text": val_samples}),
84
+ "test": Dataset.from_dict({"text": test_samples})
85
+ })
86
+
87
+ # Step 5: Upload to Hugging Face Hub
88
+ dataset_name_c4: str = "brando/small-c4-dataset"
89
+ small_c4_dataset.push_to_hub(dataset_name_c4)
90
+ print(f"Small C4 dataset uploaded to https://huggingface.co/datasets/{dataset_name_c4}")
91
+
92
+ # Run the main function
93
+ if __name__ == "__main__":
94
+ main()
95
+
96
+ ```