Datasets:

Tasks:
Other
Modalities:
Text
Languages:
Swedish
Libraries:
Datasets
License:
Robin Kurtz commited on
Commit
71fa4c6
1 Parent(s): d7f2bd0

seems to work

Browse files
dummy/original_cased/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9215c2bdb43078f28883d4c450b19e6c41d907b634361859c9ba1435a763ccb4
3
+ size 2874
dummy/original_lower/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552874213dd1e39a2e0bc5915c5a37f4cbac1954d0211bfdd51deff1b21e5bac
3
+ size 2873
dummy/original_lower_mix/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b53de0a8045101cdaf954ba324bc4510913b401e8e51c6821a795fe1a33ab31c
3
+ size 2959
dummy/simple_cased/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bdb4172cb599d00eb4752ca619c2e5ab2e62797937f4b6fe770b27156807117
3
+ size 2857
dummy/simple_lower/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe6e55f7cc6a071cbe798148e733f45a88df141c39b80113ec42857480dbca9
3
+ size 2858
dummy/simple_lower_mix/1.0.2/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c17e4530edb13b371046c4f25f4e8621fb0faf44c2ee620c84ee65dfc11312
3
+ size 2948
suc3_1.py CHANGED
@@ -146,8 +146,8 @@ class SUC3(datasets.GeneratorBasedBuilder):
146
  def _info(self):
147
  features = {"id": datasets.Value("string"),
148
  "tokens": datasets.features.Sequence(datasets.Value("string")),
149
- "pos_tags": datasets.features.Sequence(_POS_LABEL_NAMES),
150
- "ner_tags": datasets.features.Sequence(self.config.ner_label_names)}
151
 
152
  return datasets.DatasetInfo(
153
  description=_DESCRIPTION + self.config.description,
@@ -160,7 +160,6 @@ class SUC3(datasets.GeneratorBasedBuilder):
160
  def _split_generators(self, dl_manager):
161
  dl_dir = dl_manager.download_and_extract(_URL + self.config.data_url)
162
  dl_dir = os.path.join(dl_dir, self.config.data_url.split("/")[-1].split(".")[0])
163
- print("hello", dl_dir)
164
  return [
165
  datasets.SplitGenerator(
166
  name=datasets.Split.TRAIN,
@@ -184,6 +183,6 @@ class SUC3(datasets.GeneratorBasedBuilder):
184
 
185
  def _generate_examples(self, data_file):
186
  with open(data_file, encoding="utf-8") as f:
187
- for line in f:
188
  row = json.loads(line)
189
- yield row
 
146
  def _info(self):
147
  features = {"id": datasets.Value("string"),
148
  "tokens": datasets.features.Sequence(datasets.Value("string")),
149
+ "pos_tags": datasets.features.Sequence(datasets.Value("string")),
150
+ "ner_tags": datasets.features.Sequence(datasets.Value("string"))}
151
 
152
  return datasets.DatasetInfo(
153
  description=_DESCRIPTION + self.config.description,
 
160
  def _split_generators(self, dl_manager):
161
  dl_dir = dl_manager.download_and_extract(_URL + self.config.data_url)
162
  dl_dir = os.path.join(dl_dir, self.config.data_url.split("/")[-1].split(".")[0])
 
163
  return [
164
  datasets.SplitGenerator(
165
  name=datasets.Split.TRAIN,
 
183
 
184
  def _generate_examples(self, data_file):
185
  with open(data_file, encoding="utf-8") as f:
186
+ for i, line in enumerate(f):
187
  row = json.loads(line)
188
+ yield str(i), row