Datasets:

Formats:
arrow
ArXiv:
Libraries:
Datasets
License:
Yong99 commited on
Commit
e8f737b
1 Parent(s): 8c56117

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -87
README.md CHANGED
@@ -103,93 +103,7 @@ UTSD is constructed with hierarchical capacities, namely **UTSD-1G, UTSD-2G, UTS
103
 
104
  ## Usage
105
 
106
- You can load UTSD in the style of [Time-Series-Library](https://github.com/thuml/Time-Series-Library) based on the following dataset code:
107
-
108
- ```python
109
- import datasets
110
- import numpy as np
111
- from torch.utils.data import Dataset
112
- from sklearn.preprocessing import StandardScaler
113
- from tqdm import tqdm
114
-
115
- class UTSDDataset(Dataset):
116
- def __init__(self, remote=True, root_path=r'UTSD-1G', flag='train', input_len=None, pred_len=None, scale=True,
117
- stride=1, split=0.9):
118
- self.input_len = input_len
119
- self.pred_len = pred_len
120
- self.seq_len = input_len + pred_len
121
- assert flag in ['train', 'val']
122
- assert split >= 0 and split <=1.0
123
- type_map = {'train': 0, 'val': 1, 'test': 2}
124
- self.set_type = type_map[flag]
125
- self.flag = flag
126
- self.scale = scale
127
- self.split = split
128
- self.stride = stride
129
- self.remote = remote
130
-
131
- self.data_list = []
132
- self.n_window_list = []
133
-
134
- self.root_path = root_path
135
- self.__read_data__()
136
-
137
- def __read_data__(self):
138
- if self.remote:
139
- dataset = datasets.load_dataset("thuml/UTSD", "UTSD-1G")['train']
140
- else:
141
- dataset = datasets.load_from_disk(self.root_path)
142
-
143
- print(dataset)
144
- for item in tqdm(dataset):
145
- self.scaler = StandardScaler()
146
- data = item['target']
147
- data = np.array(data).reshape(-1, 1)
148
- num_train = int(len(data) * self.split)
149
- border1s = [0, num_train - self.seq_len]
150
- border2s = [num_train, len(data)]
151
-
152
- border1 = border1s[self.set_type]
153
- border2 = border2s[self.set_type]
154
-
155
- if self.scale:
156
- train_data = data[border1s[0]:border2s[0]]
157
- self.scaler.fit(train_data)
158
- data = self.scaler.transform(data)
159
-
160
- data = data[border1:border2]
161
- n_window = (len(data) - self.seq_len) // self.stride + 1
162
- if n_window < 1:
163
- continue
164
-
165
- self.data_list.append(data)
166
- self.n_window_list.append(n_window if len(self.n_window_list) == 0 else self.n_window_list[-1] + n_window)
167
-
168
-
169
- def __getitem__(self, index):
170
- dataset_index = 0
171
- while index >= self.n_window_list[dataset_index]:
172
- dataset_index += 1
173
-
174
- index = index - self.n_window_list[dataset_index - 1] if dataset_index > 0 else index
175
- n_timepoint = (len(self.data_list[dataset_index]) - self.seq_len) // self.stride + 1
176
-
177
- s_begin = index % n_timepoint
178
- s_begin = self.stride * s_begin
179
- s_end = s_begin + self.seq_len
180
- p_begin = s_end
181
- p_end = p_begin + self.pred_len
182
- seq_x = self.data_list[dataset_index][s_begin:s_end, :]
183
- seq_y = self.data_list[dataset_index][p_begin:p_end, :]
184
-
185
- return seq_x, seq_y
186
-
187
- def __len__(self):
188
- return self.n_window_list[-1]
189
-
190
- dataset = UTSDDataset(input_len=1440, pred_len=96)
191
- print(len(dataset))
192
- ```
193
 
194
  It should be noted that due to the construction of our dataset with diverse lengths, the sequence lengths of different samples vary. You can construct the data organization logic according to your own needs.
195
 
 
103
 
104
  ## Usage
105
 
106
+ You can access and load UTSD based on [the following code](https://github.com/thuml/Large-Time-Series-Model/tree/main/scripts/UTSD) in our repo.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  It should be noted that due to the construction of our dataset with diverse lengths, the sequence lengths of different samples vary. You can construct the data organization logic according to your own needs.
109