Datasets:
mteb
/

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
parquet-converter commited on
Commit
6600f38
1 Parent(s): 6214644

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,4 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- ---
 
 
 
 
 
create_data.py DELETED
@@ -1,30 +0,0 @@
1
- from datasets import DatasetDict, load_dataset
2
- import csv
3
- import json
4
-
5
- def main():
6
- label2id = {"positive": 2, "neutral": 1, "negative": 0}
7
-
8
- for split in ["train", "test"]:
9
- input_file = csv.DictReader(open(f"raw_data/{split}_csv"))
10
-
11
- with open(f'{split}.jsonl', 'w') as fOut:
12
- for row in input_file:
13
- fOut.write(json.dumps({'id': row['textID'], 'text': row['text'], 'label': label2id[row['sentiment']], 'label_text': row['sentiment']})+"\n")
14
-
15
-
16
- """
17
- train_dset = load_dataset("csv", data_files="raw_data/train_csv", split="train")
18
- train_dset = train_dset.remove_columns(["selected_text"])
19
- test_dset = load_dataset("csv", data_files="raw_data/train_csv", split="train")
20
- raw_dset = DatasetDict()
21
- raw_dset["train"] = train_dset
22
- raw_dset["test"] = test_dset
23
- for split, dset in raw_dset.items():
24
- dset = dset.rename_column("sentiment", "label_text")
25
- dset = dset.map(lambda x: {"label": label2id[x["label_text"]]}, num_proc=8)
26
- dset.to_json(f"{split}.jsonl")
27
- """
28
-
29
- if __name__ == "__main__":
30
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mteb--tweet_sentiment_extraction/json-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e5a47fefad96594d0d05474a630162fa5e639c45709630bdbd57f889cee864f
3
+ size 243818
mteb--tweet_sentiment_extraction/json-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fb7d181e977a86bfb671dd714e892a063aad36d509e9e878072df4d3ad28558
3
+ size 1893963
test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
train.jsonl DELETED
The diff for this file is too large to render. See raw diff