File size: 3,556 Bytes
5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c 2595d6d 5923c8c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
2024-06-01 14:49 - Cuda check
2024-06-01 14:49 - True
2024-06-01 14:49 - 2
2024-06-01 14:49 - Configue Model and tokenizer
2024-06-01 14:49 - Cuda check
2024-06-01 14:49 - True
2024-06-01 14:49 - 2
2024-06-01 14:49 - Configue Model and tokenizer
2024-06-01 14:49 - Memory usage in 0.00 GB
2024-06-01 14:49 - Memory usage in 0.00 GB
2024-06-01 14:49 - Dataset loaded successfully:
train-Jingmei/Pandemic_Wiki
test -Jingmei/Pandemic
2024-06-01 14:49 - Dataset loaded successfully:
train-Jingmei/Pandemic_Wiki
test -Jingmei/Pandemic
2024-06-01 14:49 - Tokenize data: DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 2152
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 8264
})
})
2024-06-01 14:49 - Tokenize data: DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 2152
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 8264
})
})
2024-06-01 14:49 - Split data into chunks:DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 24863
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 198964
})
})
2024-06-01 14:49 - Setup PEFT
2024-06-01 14:49 - Setup optimizer
2024-06-01 14:49 - Split data into chunks:DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 24863
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 198964
})
})
2024-06-01 14:49 - Setup PEFT
2024-06-01 14:49 - Setup optimizer
2024-06-01 14:49 - Start training!!
2024-06-01 14:49 - Start training!!
2024-06-01 14:51 - Cuda check
2024-06-01 14:51 - True
2024-06-01 14:51 - 2
2024-06-01 14:51 - Configue Model and tokenizer
2024-06-01 14:51 - Cuda check
2024-06-01 14:51 - True
2024-06-01 14:51 - 2
2024-06-01 14:51 - Configue Model and tokenizer
2024-06-01 14:51 - Memory usage in 0.00 GB
2024-06-01 14:51 - Memory usage in 0.00 GB
2024-06-01 14:51 - Dataset loaded successfully:
train-Jingmei/Pandemic_Wiki
test -Jingmei/Pandemic
2024-06-01 14:51 - Tokenize data: DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 2152
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 8264
})
})
2024-06-01 14:51 - Dataset loaded successfully:
train-Jingmei/Pandemic_Wiki
test -Jingmei/Pandemic
2024-06-01 14:51 - Split data into chunks:DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 24863
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 198964
})
})
2024-06-01 14:51 - Setup PEFT
2024-06-01 14:51 - Tokenize data: DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 2152
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 8264
})
})
2024-06-01 14:51 - Split data into chunks:DatasetDict({
train: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 24863
})
test: Dataset({
features: ['input_ids', 'attention_mask'],
num_rows: 198964
})
})
2024-06-01 14:51 - Setup PEFT
2024-06-01 14:51 - Setup optimizer
2024-06-01 14:51 - Setup optimizer
2024-06-01 14:51 - Start training!!
2024-06-01 14:51 - Start training!!
|