Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
sentiment-classification
Languages:
Arabic
Size:
1K - 10K
License:
Commit
•
af3f2fa
1
Parent(s):
8016dcb
Convert dataset to Parquet (#2)
Browse files- Convert dataset to Parquet (b759b14f8e3f64ea0471b38fad3943f12c59b27c)
- Delete loading script (97632ecc766c612b0e4ce16c191527a55962ea02)
- Delete legacy dataset_infos.json (2a5e63e8936fd767d3d9a6d30599868cad6bbc11)
- README.md +10 -4
- ajgt_twitter_ar.py +0 -105
- dataset_infos.json +0 -1
- plain_text/train-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -19,6 +19,7 @@ task_ids:
|
|
19 |
- sentiment-classification
|
20 |
pretty_name: Arabic Jordanian General Tweets
|
21 |
dataset_info:
|
|
|
22 |
features:
|
23 |
- name: text
|
24 |
dtype: string
|
@@ -28,13 +29,18 @@ dataset_info:
|
|
28 |
names:
|
29 |
'0': Negative
|
30 |
'1': Positive
|
31 |
-
config_name: plain_text
|
32 |
splits:
|
33 |
- name: train
|
34 |
-
num_bytes:
|
35 |
num_examples: 1800
|
36 |
-
download_size:
|
37 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
---
|
39 |
|
40 |
# Dataset Card for Arabic Jordanian General Tweets
|
|
|
19 |
- sentiment-classification
|
20 |
pretty_name: Arabic Jordanian General Tweets
|
21 |
dataset_info:
|
22 |
+
config_name: plain_text
|
23 |
features:
|
24 |
- name: text
|
25 |
dtype: string
|
|
|
29 |
names:
|
30 |
'0': Negative
|
31 |
'1': Positive
|
|
|
32 |
splits:
|
33 |
- name: train
|
34 |
+
num_bytes: 175420
|
35 |
num_examples: 1800
|
36 |
+
download_size: 91857
|
37 |
+
dataset_size: 175420
|
38 |
+
configs:
|
39 |
+
- config_name: plain_text
|
40 |
+
data_files:
|
41 |
+
- split: train
|
42 |
+
path: plain_text/train-*
|
43 |
+
default: true
|
44 |
---
|
45 |
|
46 |
# Dataset Card for Arabic Jordanian General Tweets
|
ajgt_twitter_ar.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# Lint as: python3
|
17 |
-
"""Arabic Jordanian General Tweets."""
|
18 |
-
|
19 |
-
|
20 |
-
import os
|
21 |
-
|
22 |
-
import openpyxl # noqa: requires this pandas optional dependency for reading xlsx files
|
23 |
-
import pandas as pd
|
24 |
-
|
25 |
-
import datasets
|
26 |
-
from datasets.tasks import TextClassification
|
27 |
-
|
28 |
-
|
29 |
-
_DESCRIPTION = """\
|
30 |
-
Arabic Jordanian General Tweets (AJGT) Corpus consisted of 1,800 tweets \
|
31 |
-
annotated as positive and negative. Modern Standard Arabic (MSA) or Jordanian dialect.
|
32 |
-
"""
|
33 |
-
|
34 |
-
_CITATION = """\
|
35 |
-
@inproceedings{alomari2017arabic,
|
36 |
-
title={Arabic tweets sentimental analysis using machine learning},
|
37 |
-
author={Alomari, Khaled Mohammad and ElSherif, Hatem M and Shaalan, Khaled},
|
38 |
-
booktitle={International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems},
|
39 |
-
pages={602--610},
|
40 |
-
year={2017},
|
41 |
-
organization={Springer}
|
42 |
-
}
|
43 |
-
"""
|
44 |
-
|
45 |
-
_URL = "https://raw.githubusercontent.com/komari6/Arabic-twitter-corpus-AJGT/master/"
|
46 |
-
|
47 |
-
|
48 |
-
class AjgtConfig(datasets.BuilderConfig):
|
49 |
-
"""BuilderConfig for Ajgt."""
|
50 |
-
|
51 |
-
def __init__(self, **kwargs):
|
52 |
-
"""BuilderConfig for Ajgt.
|
53 |
-
|
54 |
-
Args:
|
55 |
-
**kwargs: keyword arguments forwarded to super.
|
56 |
-
"""
|
57 |
-
super(AjgtConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
58 |
-
|
59 |
-
|
60 |
-
class AjgtTwitterAr(datasets.GeneratorBasedBuilder):
|
61 |
-
"""Ajgt dataset."""
|
62 |
-
|
63 |
-
BUILDER_CONFIGS = [
|
64 |
-
AjgtConfig(
|
65 |
-
name="plain_text",
|
66 |
-
description="Plain text",
|
67 |
-
)
|
68 |
-
]
|
69 |
-
|
70 |
-
def _info(self):
|
71 |
-
return datasets.DatasetInfo(
|
72 |
-
description=_DESCRIPTION,
|
73 |
-
features=datasets.Features(
|
74 |
-
{
|
75 |
-
"text": datasets.Value("string"),
|
76 |
-
"label": datasets.features.ClassLabel(
|
77 |
-
names=[
|
78 |
-
"Negative",
|
79 |
-
"Positive",
|
80 |
-
]
|
81 |
-
),
|
82 |
-
}
|
83 |
-
),
|
84 |
-
supervised_keys=None,
|
85 |
-
homepage="https://github.com/komari6/Arabic-twitter-corpus-AJGT",
|
86 |
-
citation=_CITATION,
|
87 |
-
task_templates=[TextClassification(text_column="text", label_column="label")],
|
88 |
-
)
|
89 |
-
|
90 |
-
def _split_generators(self, dl_manager):
|
91 |
-
urls_to_download = {
|
92 |
-
"train": os.path.join(_URL, "AJGT.xlsx"),
|
93 |
-
}
|
94 |
-
downloaded_files = dl_manager.download(urls_to_download)
|
95 |
-
return [
|
96 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
97 |
-
]
|
98 |
-
|
99 |
-
def _generate_examples(self, filepath):
|
100 |
-
"""Generate examples."""
|
101 |
-
with open(filepath, "rb") as f:
|
102 |
-
df = pd.read_excel(f, engine="openpyxl")
|
103 |
-
for id_, record in df.iterrows():
|
104 |
-
tweet, sentiment = record["Feed"], record["Sentiment"]
|
105 |
-
yield str(id_), {"text": tweet, "label": sentiment}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"plain_text": {"description": "Arabic Jordanian General Tweets (AJGT) Corpus consisted of 1,800 tweets annotated as positive and negative. Modern Standard Arabic (MSA) or Jordanian dialect.\n", "citation": "@inproceedings{alomari2017arabic,\n title={Arabic tweets sentimental analysis using machine learning},\n author={Alomari, Khaled Mohammad and ElSherif, Hatem M and Shaalan, Khaled},\n booktitle={International Conference on Industrial, Engineering and Other Applications of Applied Intelligent Systems},\n pages={602--610},\n year={2017},\n organization={Springer}\n}\n", "homepage": "https://github.com/komari6/Arabic-twitter-corpus-AJGT", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["Negative", "Positive"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["Negative", "Positive"]}], "builder_name": "ajgt_twitter_ar", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 175424, "num_examples": 1800, "dataset_name": "ajgt_twitter_ar"}}, "download_checksums": {"https://raw.githubusercontent.com/komari6/Arabic-twitter-corpus-AJGT/master/AJGT.xlsx": {"num_bytes": 107395, "checksum": "966c52213872b6b8a3ced5fb7c60aee2abf47ca673c7d2c2eeb064a60bc9ed51"}}, "download_size": 107395, "post_processing_size": null, "dataset_size": 175424, "size_in_bytes": 282819}}
|
|
|
|
plain_text/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6f35180ab9678918a07d4faf67c4b2d6c2ae29e7b2c467a990b5ec395deee9e
|
3 |
+
size 91857
|