|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
CodeClippy dataset - opensource code from Github. Scrapped July 7 2021. |
|
More to add here. |
|
""" |
|
|
|
import io |
|
import json |
|
from typing import List |
|
|
|
import jsonlines |
|
import zstandard as zstd |
|
|
|
import datasets |
|
|
|
import requests |
|
from bs4 import BeautifulSoup |
|
|
|
|
|
_CITATION = """\ |
|
@misc{cooper-2021-code-clippy-data, |
|
author = {Nathan Coooper, Artashes Arutiunian, Santiago Hincapié-Potes, Ben Trevett, Arun Raja, Erfan Hossami, Mrinal Mathur, and contributors}, |
|
title = {{Code Clippy Data: A large dataset of code data from Github for research into code language models}}, |
|
month = jul, |
|
year = 2021, |
|
version = {1.0}, |
|
publisher = {GitHub}, |
|
url = {https://github.com/ncoop57/gpt-code-clippy} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
This dataset was generated by selecting GitHub repositories from a large collection of repositories. These repositories were collected from https://seart-ghs.si.usi.ch/ and Github portion of [The Pile](https://github.com/EleutherAI/github-downloader) (performed on July 7th, 2021). The goal of this dataset is to provide a training set for pretraining large language models on code data for helping software engineering researchers better understand their impacts on software related tasks such as autocompletion of code. The dataset is split into train, validation, and test splits. There is a version containing duplicates (209GBs compressed) and ones where exact duplicates (132GBs compressed) are removed. Contains mostly JavaScript and Python code, but other programming languages are included as well to various degrees. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/CodedotAl/gpt-code-clippy/wiki" |
|
|
|
_LICENSE = "GPL-3.0" |
|
|
|
dataset_names = ["code_clippy_dedup_data", "code_clippy_dup_data"] |
|
splits = ["train", "validation", "test"] |
|
|
|
BASE_URL = "https://the-eye.eu/public/AI/training_data/code_clippy_data/" |
|
_URLs = {} |
|
for dataset in dataset_names: |
|
_URLs[dataset] = {} |
|
for split in splits: |
|
_URLs[dataset][split] = [] |
|
url = BASE_URL + dataset + "/" + split |
|
r = requests.get(url) |
|
soup = BeautifulSoup(r.content, "html.parser") |
|
results = soup.find("pre") |
|
url_elements = results.find_all("a") |
|
for url_element in url_elements: |
|
if url_element.text == "../": |
|
continue |
|
_URLs[dataset][split].append(f"{BASE_URL}/{dataset}/{split}/{url_element.text}") |
|
|
|
|
|
class CodeClippy(datasets.GeneratorBasedBuilder): |
|
"""CodeClippy dataset - opensource code from Github. Scrapped July 7 2021.""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="code_clippy_dedup_data", |
|
version=VERSION, |
|
description="Contains a deduplicated version of files scraped from GitHub including non-code related files such as txts.", |
|
), |
|
datasets.BuilderConfig( |
|
name="code_clippy_dup_data", |
|
version=VERSION, |
|
description="Contains files scraped from GitHub including non-code related files such as txts. This version has duplicates.", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "code_clippy_dedup_data" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"text": datasets.Value("string"), |
|
"repo_name": datasets.Value("string"), |
|
"stars": datasets.Value("string"), |
|
"repo_language": datasets.Value("string"), |
|
"file_name": datasets.Value("string"), |
|
"mime_type": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
downloaded_files = dl_manager.download(_URLs[self.config.name]) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_files["test"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_files["validation"]} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepaths: List): |
|
"""Yields examples as (key, example) tuples.""" |
|
id_ = 0 |
|
dctx = zstd.ZstdDecompressor() |
|
|
|
for filepath in filepaths: |
|
with open(filepath, "rb") as f: |
|
f = dctx.stream_reader(f) |
|
f = io.TextIOWrapper(f, encoding="utf-8") |
|
f = jsonlines.Reader(f) |
|
for line in f: |
|
yield id_, {"id": id_, "text": line["text"], **line["meta"]} |
|
id_ += 1 |
|
|