mlenjoyneer commited on
Commit
f7d2c61
1 Parent(s): 954320e

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. README.md +82 -0
  3. rutextsegwiki.txt +71 -0
  4. test.jsonl +3 -0
  5. train.jsonl +3 -0
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ test.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - ru
8
+ size_categories:
9
+ - 10K<n<100K
10
+ license:
11
+ - unknown
12
+ multilinguality:
13
+ - monolingual
14
+ source_datasets:
15
+ - original
16
+ ---
17
+
18
+ # Dataset Card
19
+
20
+ ## Table of Contents
21
+ - [Table of Contents](#table-of-contents)
22
+ - [Dataset Description](#dataset-description)
23
+ - [Dataset Summary](#dataset-summary)
24
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
25
+ - [Languages](#languages)
26
+ - [Dataset Structure](#dataset-structure)
27
+ - [Data Instances](#data-instances)
28
+ - [Data Fields](#data-fields)
29
+ - [Data Splits](#data-splits)
30
+ - [Additional Information](#additional-information)
31
+ - [Licensing Information](#licensing-information)
32
+ - [Citation Information](#citation-information)
33
+
34
+ ## Dataset Description
35
+
36
+ ### Dataset Summary
37
+
38
+ Dataset for automatic text segmentation of Russian wiki. Text corpora based on May 2023 Wikipedia dump. Markup was generated automatically based on 2 methods: taking texts with ready division into paragraphs and random joining parts of different texts.
39
+
40
+ ### Supported Tasks and Leaderboards
41
+
42
+ Dataset designed for text segmentation task.
43
+
44
+ ### Languages
45
+
46
+ The dataset is in Russian.
47
+
48
+ ### Usage
49
+
50
+ ```python
51
+ from datasets import load_dataset
52
+ dataset = load_dataset('mlenjoyneer/RuTextSegWiki')
53
+ ```
54
+
55
+ ### Other datasets
56
+
57
+ mlenjoyneer/RuTextSegNews - similar dataset based on news corpora
58
+
59
+ ## Dataset Structure
60
+
61
+ ### Data Instances
62
+
63
+ For each instance, there is a list of strings for text sentences, a list of ints for labels (1 is new topic starting and 0 is previous topic continuation) and a string for sample generation method (base or random_joining).
64
+
65
+ ### Data Splits
66
+
67
+ | Dataset Split | Number of Instances in Split |
68
+ |:---------|:---------|
69
+ | Train | 20000 |
70
+ | Test | 4000 |
71
+
72
+ ## Additional Information
73
+
74
+ ### Licensing Information
75
+
76
+ In progress
77
+
78
+ ### Citation Information
79
+
80
+ ```bibtex
81
+ In progress
82
+ ```
rutextsegwiki.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ # Lint as: python3
15
+ """RuTextSegWiki: Dataset for automatic text semantic segmentation of Russian wiki"""
16
+
17
+
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """
25
+ In progress
26
+ """
27
+
28
+ _DESCRIPTION = "Dataset for automatic text semantic segmentation of Russian wiki"
29
+ _URLS = {
30
+ "train": "train.jsonl",
31
+ "test": "test.jsonl"
32
+ }
33
+
34
+
35
+ class RuTextSegWikiDataset(datasets.GeneratorBasedBuilder):
36
+ """RuTextSegWiki Dataset"""
37
+
38
+ VERSION = datasets.Version("1.0.0")
39
+
40
+ BUILDER_CONFIGS = [
41
+ datasets.BuilderConfig(name="default", version=VERSION, description=""),
42
+ ]
43
+
44
+ DEFAULT_CONFIG_NAME = "default"
45
+
46
+ def _info(self):
47
+ features = datasets.Features(
48
+ {
49
+ "sentences": [datasets.Value("string")],
50
+ "labels": [datasets.Value("int8")],
51
+ "method": datasets.Value("string")
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=features,
57
+ citation=_CITATION,
58
+ )
59
+
60
+ def _split_generators(self, dl_manager):
61
+ downloaded_files = dl_manager.download_and_extract(_URLS)
62
+ return [
63
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
64
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
65
+ ]
66
+
67
+ def _generate_examples(self, filepath):
68
+ with open(filepath, encoding="utf-8") as f:
69
+ for id_, row in enumerate(f):
70
+ data = json.loads(row)
71
+ yield id_, data
test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:256d26f1ba96d0a01fe078ff13a130f8bbdf5513ed8461775b41c5dbd15e79eb
3
+ size 60033505
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32fc8871c27d17cef2aad0fc3f88712ac3debbb76d8396e61559e5b69c8b894d
3
+ size 295861108