|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
import csv |
|
import json |
|
import os |
|
from typing import Sequence |
|
import pandas as pd |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{liguori-etal-2021-shellcode, |
|
title = "{S}hellcode{\_}{IA}32: A Dataset for Automatic Shellcode Generation", |
|
author = "Liguori, Pietro and |
|
Al-Hossami, Erfan and |
|
Cotroneo, Domenico and |
|
Natella, Roberto and |
|
Cukic, Bojan and |
|
Shaikh, Samira", |
|
booktitle = "Proceedings of the 1st Workshop on Natural Language Processing for Programming (NLP4Prog 2021)", |
|
month = aug, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.nlp4prog-1.7", |
|
doi = "10.18653/v1/2021.nlp4prog-1.7", |
|
pages = "58--64", |
|
abstract = "We take the first step to address the task of automatically generating shellcodes, i.e., small pieces of code used as a payload in the exploitation of a software vulnerability, starting from natural language comments. We assemble and release a novel dataset (Shellcode{\_}IA32), consisting of challenging but common assembly instructions with their natural language descriptions. We experiment with standard methods in neural machine translation (NMT) to establish baseline performance levels on this task.", |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Shellcode_IA32 is a dataset for shellcode generation from English intents. The shellcodes are compilable on Intel Architecture 32-bits. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/dessertlab/Shellcode_IA32" |
|
|
|
_LICENSE = "GNU GENERAL PUBLIC LICENSE" |
|
|
|
|
|
|
|
_URLs = { |
|
'default': "https://raw.githubusercontent.com/dessertlab/Shellcode_IA32/main/Shellcode_IA32.tsv", |
|
} |
|
|
|
class ShellcodeIA32(datasets.GeneratorBasedBuilder): |
|
"""Shellcode_IA32 a dataset for shellcode generation""" |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": { |
|
"text": datasets.Sequence(datasets.Value("string")), |
|
"answer_start": datasets.Sequence(datasets.Value("int32")) |
|
} |
|
} |
|
) |
|
|
|
""" |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": { |
|
"text": datasets.Sequence(datasets.Value("string")), |
|
"answer_start": datasets.Sequence(datasets.Value("int32")) |
|
} |
|
} |
|
) |
|
""" |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/Serhii/Custom_SQuAD/blob/main/Dataset.json") |
|
data_dir = "/content/drive/MyDrive/datasets/custom_squad/Dataset.json" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir), |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir), |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
"""This function returns the examples in the raw (text) form.""" |
|
print(f"FILEPATH ------------------ {filepath}") |
|
df = pd.read_json(filepath, lines=True) |
|
train = df.sample(frac = 0.8, random_state = 0) |
|
test = df.drop(train.index) |
|
dev = test.sample(frac = 0.5, random_state = 0) |
|
test = test.drop(dev.index) |
|
if split == 'train': |
|
data = train |
|
elif split == 'dev': |
|
data = dev |
|
elif split == 'test': |
|
data = test |
|
for idx, row in data.iterrows(): |
|
yield idx, { |
|
"id": row["id"], |
|
"title": "", |
|
"context": row["context"], |
|
"question": row["question"], |
|
"answers": { |
|
"text": row["answers"]["text"], |
|
"answer_start": [0] |
|
} |
|
} |
|
|
|
""" |
|
for idx, row in data.iterrows(): |
|
yield idx, { |
|
"id": row["id"], |
|
"title": row["title"], |
|
"context": row["context"], |
|
"question": row["question"], |
|
"answers": { |
|
"text": row["answers"]["text"], |
|
"answer_start": row["answers"]["answer_start"] |
|
} |
|
} |
|
""" |
|
|