Datasets:

Languages:
Japanese
License:
shunk031 commited on
Commit
fb189c1
1 Parent(s): 8dcde32

Initialize (#1)

Browse files

* add README.md

* add script

* add poetry files

* add scripts for tests

* add settings for CI

* add .gitignore

* update README.md

* add settings for CI

* update README

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - 'README.md'
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ['3.8', '3.9', '3.10']
17
+
18
+ steps:
19
+ - uses: actions/checkout@v2
20
+ - name: Set up Python ${{ matrix.python-version }}
21
+ uses: actions/setup-python@v2
22
+ with:
23
+ python-version: ${{ matrix.python-version }}
24
+
25
+ - name: Install dependencies
26
+ run: |
27
+ pip install -U pip setuptools wheel poetry
28
+ poetry install
29
+ - name: Format
30
+ run: |
31
+ poetry run black --check .
32
+ - name: Lint
33
+ run: |
34
+ poetry run flake8 . --ignore=E501,W503,E203
35
+ - name: Type check
36
+ run: |
37
+ poetry run mypy . \
38
+ --ignore-missing-imports \
39
+ --no-strict-optional \
40
+ --no-site-packages \
41
+ --cache-dir=/dev/null
42
+
43
+ - name: Run tests
44
+ run: |
45
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v2
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/CAMERA main
.gitignore ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # ruff
171
+ .ruff_cache/
172
+
173
+ # LSP config files
174
+ pyrightconfig.json
175
+
176
+ # End of https://www.toptal.com/developers/gitignore/api/python
CAMERA.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import os
3
+ from typing import Optional
4
+
5
+ import datasets as ds
6
+ import pandas as pd
7
+
8
+ _CITATION = """\
9
+ @inproceedings{mita-et-al:nlp2023,
10
+ author = "三田 雅人 and 村上 聡一朗 and 張 培楠",
11
+ title = "広告文生成タスクの規定とベンチマーク構築",
12
+ booktitle = "言語処理学会 第29回年次大会",
13
+ year = 2023,
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ CAMERA (CyberAgent Multimodal Evaluation for Ad Text GeneRAtion) is the Japanese ad text generation dataset.
19
+ """
20
+
21
+ _HOMEPAGE = "https://github.com/CyberAgentAILab/camera"
22
+
23
+ _LICENSE = """\
24
+ This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
25
+ """
26
+
27
+ _URLS = {
28
+ "without-lp-images": "https://storage.googleapis.com/camera-public/camera-v1-minimal.tar.gz",
29
+ "with-lp-images": "https://storage.googleapis.com/camera-public/camera-v1.tar.gz",
30
+ }
31
+
32
+
33
+ class CameraDataset(ds.GeneratorBasedBuilder):
34
+ VERSION = ds.Version("1.0.0")
35
+ BUILDER_CONFIGS = [
36
+ ds.BuilderConfig(
37
+ name="without-lp-images",
38
+ version=VERSION,
39
+ description="The CAMERA dataset w/o LP images (ver.1.0.0 | 126.2 MiB)",
40
+ ),
41
+ ds.BuilderConfig(
42
+ name="with-lp-images",
43
+ version=VERSION,
44
+ description="The CAMERA dataset w/ LP images (ver.1.0.0 | 61.5 GiB)",
45
+ ),
46
+ ]
47
+
48
+ def _info(self) -> ds.DatasetInfo:
49
+ features = ds.Features(
50
+ {
51
+ "asset_id": ds.Value("int64"),
52
+ "kw": ds.Value("string"),
53
+ "lp_meta_description": ds.Value("string"),
54
+ "title_org": ds.Value("string"),
55
+ "title_ne1": ds.Value("string"),
56
+ "title_ne2": ds.Value("string"),
57
+ "title_ne3": ds.Value("string"),
58
+ "domain": ds.Value("string"),
59
+ "parsed_full_text_annotation": ds.Sequence(
60
+ {
61
+ "text": ds.Value("string"),
62
+ "xmax": ds.Value("int64"),
63
+ "xmin": ds.Value("int64"),
64
+ "ymax": ds.Value("int64"),
65
+ "ymin": ds.Value("int64"),
66
+ }
67
+ ),
68
+ }
69
+ )
70
+
71
+ if self.config.name == "with-lp-images":
72
+ features["lp_image"] = ds.Image()
73
+
74
+ return ds.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ citation=_CITATION,
77
+ homepage=_HOMEPAGE,
78
+ license=_LICENSE,
79
+ features=features,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager: ds.DownloadManager):
83
+ base_dir = dl_manager.download_and_extract(_URLS[self.config.name])
84
+ lp_image_dir: Optional[str] = None
85
+
86
+ if self.config.name == "without-lp-images":
87
+ camera_dir_name = f"camera-v{self.VERSION.major}-minimal"
88
+ elif self.config.name == "with-lp-images":
89
+ camera_dir_name = f"camera-v{self.VERSION.major}"
90
+ lp_image_dir = os.path.join(base_dir, camera_dir_name, "lp-screenshot")
91
+ else:
92
+ raise ValueError(f"Invalid config name: {self.config.name}")
93
+
94
+ tng_path = os.path.join(base_dir, camera_dir_name, "train.csv")
95
+ dev_path = os.path.join(base_dir, camera_dir_name, "dev.csv")
96
+ tst_path = os.path.join(base_dir, camera_dir_name, "test.csv")
97
+
98
+ return [
99
+ ds.SplitGenerator(
100
+ name=ds.Split.TRAIN,
101
+ gen_kwargs={"file_path": tng_path, "lp_image_dir": lp_image_dir},
102
+ ),
103
+ ds.SplitGenerator(
104
+ name=ds.Split.VALIDATION,
105
+ gen_kwargs={"file_path": dev_path, "lp_image_dir": lp_image_dir},
106
+ ),
107
+ ds.SplitGenerator(
108
+ name=ds.Split.TEST,
109
+ gen_kwargs={"file_path": tst_path, "lp_image_dir": lp_image_dir},
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, file_path: str, lp_image_dir: Optional[str] = None):
114
+ df = pd.read_csv(file_path)
115
+ for i in range(len(df)):
116
+ data_dict = df.iloc[i].to_dict()
117
+
118
+ asset_id = data_dict["asset_id"]
119
+ keywords = data_dict["kw"]
120
+ lp_meta_description = data_dict["lp_meta_description"]
121
+ domain = data_dict.get("domain", "")
122
+ text_anns = ast.literal_eval(data_dict["parsed_full_text_annotation"])
123
+
124
+ title_org = data_dict["title_org"]
125
+ title_ne1 = data_dict.get("title_ne1", "")
126
+ title_ne2 = data_dict.get("title_ne2", "")
127
+ title_ne3 = data_dict.get("title_ne3", "")
128
+
129
+ example_dict = {
130
+ "asset_id": asset_id,
131
+ "kw": keywords,
132
+ "lp_meta_description": lp_meta_description,
133
+ "title_org": title_org,
134
+ "title_ne1": title_ne1,
135
+ "title_ne2": title_ne2,
136
+ "title_ne3": title_ne3,
137
+ "domain": domain,
138
+ "parsed_full_text_annotation": text_anns,
139
+ }
140
+
141
+ if self.config.name == "with-lp-images" and lp_image_dir is not None:
142
+ lp_image_file_name = f"screen-1200-{asset_id}.png"
143
+ example_dict["lp_image"] = os.path.join(
144
+ lp_image_dir, lp_image_file_name
145
+ )
146
+
147
+ yield i, example_dict
README.md ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language:
5
+ - ja-JP
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - cc-by-nc-sa-4.0
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: CAMERA
13
+ size_categories: []
14
+ source_datasets:
15
+ - original
16
+ tags: []
17
+ task_categories:
18
+ - text-generation
19
+ task_ids: []
20
+ ---
21
+
22
+ # Dataset Card for CAMERA 📷
23
+
24
+ [![CI](https://github.com/shunk031/huggingface-datasets_CAMERA/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_CAMERA/actions/workflows/ci.yaml)
25
+
26
+ ## Table of Contents
27
+ - [Table of Contents](#table-of-contents)
28
+ - [Dataset Description](#dataset-description)
29
+ - [Dataset Summary](#dataset-summary)
30
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
31
+ - [Languages](#languages)
32
+ - [Dataset Structure](#dataset-structure)
33
+ - [Data Instances](#data-instances)
34
+ - [Data Fields](#data-fields)
35
+ - [Data Splits](#data-splits)
36
+ - [Dataset Creation](#dataset-creation)
37
+ - [Curation Rationale](#curation-rationale)
38
+ - [Source Data](#source-data)
39
+ - [Annotations](#annotations)
40
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
41
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
42
+ - [Social Impact of Dataset](#social-impact-of-dataset)
43
+ - [Discussion of Biases](#discussion-of-biases)
44
+ - [Other Known Limitations](#other-known-limitations)
45
+ - [Additional Information](#additional-information)
46
+ - [Dataset Curators](#dataset-curators)
47
+ - [Licensing Information](#licensing-information)
48
+ - [Citation Information](#citation-information)
49
+ - [Contributions](#contributions)
50
+
51
+ ## Dataset Description
52
+
53
+ - **Homepage:** https://github.com/CyberAgentAILab/camera
54
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_CAMERA
55
+
56
+ ### Dataset Summary
57
+
58
+ From [the official README.md](https://github.com/CyberAgentAILab/camera#camera-dataset):
59
+
60
+ > CAMERA (CyberAgent Multimodal Evaluation for Ad Text GeneRAtion) is the Japanese ad text generation dataset. We hope that our dataset will be useful in research for realizing more advanced ad text generation models.
61
+
62
+ ### Supported Tasks and Leaderboards
63
+
64
+ [More Information Needed]
65
+
66
+ #### Supported Tasks
67
+
68
+ [More Information Needed]
69
+
70
+ #### Leaderboard
71
+
72
+ [More Information Needed]
73
+
74
+ ### Languages
75
+
76
+ The language data in CAMERA is in Japanese ([BCP-47 ja-JP](https://www.rfc-editor.org/info/bcp47)).
77
+
78
+ ## Dataset Structure
79
+
80
+ ### Data Instances
81
+
82
+ When loading a specific configuration, users has to append a version dependent suffix:
83
+
84
+ #### without-lp-images
85
+
86
+ ```python
87
+ from datasets import load_dataset
88
+
89
+ dataset = load_dataset("shunk031/CAMERA", name="without-lp-images")
90
+
91
+ print(dataset)
92
+ # DatasetDict({
93
+ # train: Dataset({
94
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation'],
95
+ # num_rows: 12395
96
+ # })
97
+ # validation: Dataset({
98
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation'],
99
+ # num_rows: 3098
100
+ # })
101
+ # test: Dataset({
102
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation'],
103
+ # num_rows: 872
104
+ # })
105
+ # })
106
+ ```
107
+
108
+ An example of the CAMERA (w/o LP images) dataset looks as follows:
109
+
110
+ ```json
111
+ {
112
+ "asset_id": 13861,
113
+ "kw": "仙台 ホテル",
114
+ "lp_meta_description": "仙台のホテルや旅館をお探しなら楽天トラベルへ!楽天ポイントが使えて、貯まって、とってもお得な宿泊予約サイトです。さらに割引クーポンも使える!国内ツアー・航空券・レンタカー・バス予約も!",
115
+ "title_org": "仙台市のホテル",
116
+ "title_ne1": "",
117
+ "title_ne2": "",
118
+ "title_ne3": "",
119
+ "domain": "",
120
+ "parsed_full_text_annotation": {
121
+ "text": [
122
+ "trivago",
123
+ "Oops...AccessDenied 可",
124
+ "Youarenotallowedtoviewthispage!Ifyouthinkthisisanerror,pleasecontacttrivago.",
125
+ "Errorcode:0.3c99e86e.1672026945.25ba640YourIP:240d:1a:4d8:2800:b9b0:ea86:2087:d141AffectedURL:https://www.trivago.jp/ja/odr/%E8%BB%92", "%E4%BB%99%E5%8F%B0-%E5%9B%BD%E5%86%85?search=20072325",
126
+ "Backtotrivago"
127
+ ],
128
+ "xmax": [
129
+ 653,
130
+ 838,
131
+ 765,
132
+ 773,
133
+ 815,
134
+ 649
135
+ ],
136
+ "xmin": [
137
+ 547,
138
+ 357,
139
+ 433,
140
+ 420,
141
+ 378,
142
+ 550
143
+ ],
144
+ "ymax": [
145
+ 47,
146
+ 390,
147
+ 475,
148
+ 558,
149
+ 598,
150
+ 663
151
+ ],
152
+ "ymin": [
153
+ 18,
154
+ 198,
155
+ 439,
156
+ 504,
157
+ 566,
158
+ 651
159
+ ]
160
+ }
161
+ }
162
+ ```
163
+
164
+ #### with-lp-images
165
+
166
+ ```python
167
+ from datasets import load_dataset
168
+
169
+ dataset = load_dataset("shunk031/CAMERA", name="with-lp-images")
170
+
171
+ print(dataset)
172
+ # DatasetDict({
173
+ # train: Dataset({
174
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation', 'lp_image'],
175
+ # num_rows: 12395
176
+ # })
177
+ # validation: Dataset({
178
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation', 'lp_image'],
179
+ # num_rows: 3098
180
+ # })
181
+ # test: Dataset({
182
+ # features: ['asset_id', 'kw', 'lp_meta_description', 'title_org', 'title_ne1', 'title_ne2', 'title_ne3', 'domain', 'parsed_full_text_annotation', 'lp_image'],
183
+ # num_rows: 872
184
+ # })
185
+ # })
186
+ ```
187
+
188
+ An example of the CAMERA (w/ LP images) dataset looks as follows:
189
+
190
+ ```json
191
+ {
192
+ "asset_id": 13861,
193
+ "kw": "仙台 ホテル",
194
+ "lp_meta_description": "仙台のホテルや旅館をお探しなら楽天トラベルへ!楽天ポイントが使えて、貯まって、とってもお得な宿泊予約サイトです。さらに割引クーポンも使える!国内ツアー・航空券・レンタカー・バス予約も!",
195
+ "title_org": "仙台市のホテル",
196
+ "title_ne1": "",
197
+ "title_ne2": "",
198
+ "title_ne3": "",
199
+ "domain": "",
200
+ "parsed_full_text_annotation": {
201
+ "text": [
202
+ "trivago",
203
+ "Oops...AccessDenied 可",
204
+ "Youarenotallowedtoviewthispage!Ifyouthinkthisisanerror,pleasecontacttrivago.",
205
+ "Errorcode:0.3c99e86e.1672026945.25ba640YourIP:240d:1a:4d8:2800:b9b0:ea86:2087:d141AffectedURL:https://www.trivago.jp/ja/odr/%E8%BB%92", "%E4%BB%99%E5%8F%B0-%E5%9B%BD%E5%86%85?search=20072325",
206
+ "Backtotrivago"
207
+ ],
208
+ "xmax": [
209
+ 653,
210
+ 838,
211
+ 765,
212
+ 773,
213
+ 815,
214
+ 649
215
+ ],
216
+ "xmin": [
217
+ 547,
218
+ 357,
219
+ 433,
220
+ 420,
221
+ 378,
222
+ 550
223
+ ],
224
+ "ymax": [
225
+ 47,
226
+ 390,
227
+ 475,
228
+ 558,
229
+ 598,
230
+ 663
231
+ ],
232
+ "ymin": [
233
+ 18,
234
+ 198,
235
+ 439,
236
+ 504,
237
+ 566,
238
+ 651
239
+ ]
240
+ },
241
+ "lp_image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x680 at 0x7F8513446B20>
242
+ }
243
+ ```
244
+
245
+ ### Data Fields
246
+
247
+ #### without-lp-images
248
+
249
+ - `asset_id`: ids (associated with LP images)
250
+ - `kw`: search keyword
251
+ - `lp_meta_description`: meta description extracted from LP (i.e., LP Text)
252
+ - `title_org`: ad text (original gold reference)
253
+ - `title_ne{1-3}`: ad text (additonal gold references for multi-reference evaluation)
254
+ - `domain`: industry domain (HR, EC, Fin, Edu) for industry-wise evaluation
255
+ - `parsed_full_text_annotation`: OCR results for LP images
256
+
257
+ #### with-lp-images
258
+
259
+ - `asset_id`: ids (associated with LP images)
260
+ - `kw`: search keyword
261
+ - `lp_meta_description`: meta description extracted from LP (i.e., LP Text)
262
+ - `title_org`: ad text (original gold reference)
263
+ - `title_ne{1-3}`: ad text (additional gold references for multi-reference evaluation)
264
+ - `domain`: industry domain (HR, EC, Fin, Edu) for industry-wise evaluation
265
+ - `parsed_full_text_annotation`: OCR results for LP images
266
+ - `lp_image`: Landing page (LP) image
267
+
268
+ ### Data Splits
269
+
270
+ From [the official paper](https://www.anlp.jp/proceedings/annual_meeting/2023/pdf_dir/H11-4.pdf):
271
+
272
+ | Split | # of data | # of reference ad text | industry domain label |
273
+ |-------|----------:|-----------------------:|:---------------------:|
274
+ | Train | 12,395 | 1 | - |
275
+ | Valid | 3,098 | 1 | - |
276
+ | Test | 869 | 4 | ✔ |
277
+
278
+ ## Dataset Creation
279
+
280
+ ### Curation Rationale
281
+
282
+ [More Information Needed]
283
+
284
+ ### Source Data
285
+
286
+ #### Initial Data Collection and Normalization
287
+
288
+ [More Information Needed]
289
+
290
+ #### Who are the source language producers?
291
+
292
+ [More Information Needed]
293
+
294
+ ### Annotations
295
+
296
+ #### Annotation process
297
+
298
+ [More Information Needed]
299
+
300
+ #### Who are the annotators?
301
+
302
+ [More Information Needed]
303
+
304
+ ### Personal and Sensitive Information
305
+
306
+ [More Information Needed]
307
+
308
+ ## Considerations for Using the Data
309
+
310
+ ### Social Impact of Dataset
311
+
312
+ [More Information Needed]
313
+
314
+ ### Discussion of Biases
315
+
316
+ [More Information Needed]
317
+
318
+ ### Other Known Limitations
319
+
320
+ [More Information Needed]
321
+
322
+ ## Additional Information
323
+
324
+ [More Information Needed]
325
+
326
+ ### Dataset Curators
327
+
328
+ [More Information Needed]
329
+
330
+ ### Licensing Information
331
+
332
+ > This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
333
+
334
+ ### Citation Information
335
+
336
+ ```bibtex
337
+ @inproceedings{mita-et-al:nlp2023,
338
+ author = "三田 雅人 and 村上 聡一朗 and 張 培楠",
339
+ title = "広告文生成タスクの規���とベンチマーク構築",
340
+ booktitle = "言語処理学会 第 29 回年次大会",
341
+ year = 2023,
342
+ }
343
+ ```
344
+
345
+ ### Contributions
346
+
347
+ Thanks to [Masato Mita](https://github.com/chemicaltree), [Soichiro Murakami](https://github.com/ichiroex), and [Peinan Zhang](https://github.com/peinan) for creating this dataset.
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-camera"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <[email protected]>"]
6
+ readme = "README.md"
7
+ packages = []
8
+
9
+ [tool.poetry.dependencies]
10
+ python = "^3.8"
11
+ datasets = "^2.10.1"
12
+ pillow = "^9.4.0"
13
+
14
+
15
+ [tool.poetry.group.dev.dependencies]
16
+ black = "^23.1.0"
17
+ isort = "^5.12.0"
18
+ flake8 = "^6.0.0"
19
+ mypy = "^1.1.1"
20
+ pytest = "^7.2.2"
21
+
22
+ [build-system]
23
+ requires = ["poetry-core"]
24
+ build-backend = "poetry.core.masonry.api"
tests/CAMERA_test.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets as ds
4
+ import pytest
5
+
6
+
7
+ @pytest.fixture
8
+ def dataset_path() -> str:
9
+ return "CAMERA.py"
10
+
11
+
12
+ def test_load_dataset_without_lp_images(
13
+ dataset_path: str,
14
+ expected_train_num_rows: int = 12395,
15
+ expected_val_num_rows: int = 3098,
16
+ expected_test_num_rows: int = 872,
17
+ ):
18
+ dataset = ds.load_dataset(path=dataset_path, name="without-lp-images")
19
+
20
+ assert dataset["train"].num_rows == expected_train_num_rows # type: ignore
21
+ assert dataset["validation"].num_rows == expected_val_num_rows # type: ignore
22
+ assert dataset["test"].num_rows == expected_test_num_rows # type: ignore
23
+
24
+
25
+ @pytest.mark.skipif(
26
+ bool(os.environ.get("CI", False)),
27
+ reason="Because this test downloads a large data set, we will skip running it on CI.",
28
+ )
29
+ def test_load_dataset_with_lp_images(
30
+ dataset_path: str,
31
+ expected_train_num_rows: int = 12395,
32
+ expected_val_num_rows: int = 3098,
33
+ expected_test_num_rows: int = 872,
34
+ ):
35
+ dataset = ds.load_dataset(path=dataset_path, name="with-lp-images")
36
+
37
+ assert dataset["train"].num_rows == expected_train_num_rows # type: ignore
38
+ assert dataset["validation"].num_rows == expected_val_num_rows # type: ignore
39
+ assert dataset["test"].num_rows == expected_test_num_rows # type: ignore
40
+
41
+ assert "lp_image" in dataset["train"].column_names
tests/__Init__.py ADDED
File without changes