|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
import re |
|
import datasets |
|
from datasets import BuilderConfig |
|
|
|
_DESCRIPTION = """\ |
|
United States governmental agencies often make proposed regulations open to the public for comment. |
|
Proposed regulations are organized into "dockets". This dataset will use Regulation.gov public API |
|
to aggregate and clean public comments for dockets that mention opioid use. |
|
|
|
Each example will consist of one docket, and include metadata such as docket id, docket title, etc. |
|
Each docket entry will also include information about the top 10 comments, including comment metadata |
|
and comment text. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://www.regulations.gov/" |
|
_CITATION = """@misc{ro_huang_regulatory_2023-1, |
|
author = {{Ro Huang}}, |
|
date = {2023-03-19}, |
|
publisher = {Hugging Face}, |
|
title = {Regulatory Comments {API} Call}, |
|
url = {https://huggingface.co/datasets/ro-h/regulatory_comments_api}, |
|
version = {1.1.4}, |
|
bdsk-url-1 = {https://huggingface.co/datasets/ro-h/regulatory_comments_api}} |
|
""" |
|
|
|
class RegulationsDataFetcher: |
|
BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments' |
|
BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/' |
|
|
|
def __init__(self, docket_id, api_key): |
|
self.docket_id = docket_id |
|
self.api_key = api_key |
|
self.docket_url = self.BASE_DOCKET_URL + docket_id |
|
self.headers = { |
|
'X-Api-Key': self.api_key, |
|
'Content-Type': 'application/json' |
|
} |
|
|
|
def fetch_comments(self): |
|
"""Fetch a single page of 25 comments.""" |
|
url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25' |
|
response = requests.get(url, headers=self.headers) |
|
|
|
if response.status_code == 200: |
|
return response.json() |
|
elif response.status_code == 429: |
|
print(f'API Rate Limit Reached.') |
|
return None |
|
|
|
else: |
|
print(f'Failed to retrieve comments: {response.status_code}') |
|
return None |
|
|
|
def get_docket_info(self): |
|
"""Get docket information.""" |
|
response = requests.get(self.docket_url, headers=self.headers) |
|
|
|
if response.status_code == 200: |
|
docket_data = response.json() |
|
return (docket_data['data']['attributes']['agencyId'], |
|
docket_data['data']['attributes']['title'], |
|
docket_data['data']['attributes']['modifyDate'], |
|
docket_data['data']['attributes']['docketType'], |
|
docket_data['data']['attributes']['keywords']) |
|
elif response.status_code == 429: |
|
print(f'API Rate Limit Reached.') |
|
return None |
|
|
|
else: |
|
print(f'Failed to retrieve docket info: {response.status_code}') |
|
return None |
|
|
|
def fetch_comment_details(self, comment_url): |
|
"""Fetch detailed information of a comment.""" |
|
response = requests.get(comment_url, headers=self.headers) |
|
|
|
if response.status_code == 200: |
|
return response.json() |
|
else: |
|
print(f'Failed to retrieve comment details: {response.status_code}') |
|
return None |
|
|
|
def collect_data(self): |
|
"""Collect data and reshape into nested dictionary format.""" |
|
data = self.fetch_comments() |
|
if not data: |
|
return None |
|
|
|
docket_info = self.get_docket_info() |
|
if not docket_info: |
|
return None |
|
|
|
|
|
nested_data = { |
|
"id": self.docket_id, |
|
"agency": self.docket_id.split('-')[0], |
|
"title": docket_info[1] if docket_info else "Unknown Title", |
|
"update_date": docket_info[2].split('T')[0] if docket_info and docket_info[2] else "Unknown Update Date", |
|
"update_time": docket_info[2].split('T')[1].strip('Z') if docket_info and docket_info[2] and 'T' in docket_info[2] else "Unknown Update Time", |
|
"purpose": docket_info[3], |
|
"keywords": docket_info[4], |
|
"comments": [] |
|
} |
|
|
|
|
|
if 'data' in data: |
|
for comment in data['data']: |
|
if len(nested_data["comments"]) >= 10: |
|
break |
|
|
|
comment_details = self.fetch_comment_details(comment['links']['self']) |
|
if 'data' in comment_details and 'attributes' in comment_details['data']: |
|
comment_data = comment_details['data']['attributes'] |
|
|
|
|
|
comment_text = (comment_data.get('comment', '') or '').strip() |
|
comment_text = comment_text.replace("<br/>", "").replace("<span style='padding-left: 30px'></span>", "") |
|
comment_text = re.sub(r'&[^;]+;', '', comment_text) |
|
|
|
|
|
if (comment_text and "attached" not in comment_text.lower() and "attachment" not in comment_text.lower() and comment_text.lower() != "n/a"): |
|
nested_comment = { |
|
"text": comment_text, |
|
"comment_id": comment['id'], |
|
"comment_url": comment['links']['self'], |
|
"comment_date": comment['attributes']['postedDate'].split('T')[0], |
|
"comment_time": comment['attributes']['postedDate'].split('T')[1].strip('Z'), |
|
"commenter_fname": ((comment_data.get('firstName') or 'Anonymous').split(',')[0]).capitalize(), |
|
"commenter_lname": ((comment_data.get('lastName') or 'Anonymous').split(',')[0]).capitalize(), |
|
"comment_length": len(comment_text) if comment_text is not None else 0 |
|
} |
|
nested_data["comments"].append(nested_comment) |
|
|
|
return nested_data |
|
|
|
|
|
|
|
|
|
|
|
class RegCommentsAPIConfig(BuilderConfig): |
|
def __init__(self, api_key=None, docket_ids = None, **kwargs): |
|
self.api_key = api_key |
|
self.docket_ids = docket_ids |
|
super(RegCommentsAPIConfig, self).__init__(**kwargs) |
|
|
|
|
|
class RegCommentsAPI(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
RegCommentsAPIConfig( |
|
name="default", |
|
version=datasets.Version("1.0.0"), |
|
description="Dataset of regulatory comments" |
|
) |
|
] |
|
BUILDER_CONFIG_CLASS = RegCommentsAPIConfig |
|
|
|
|
|
def _info(self): |
|
|
|
features = datasets.Features({ |
|
"id": datasets.Value("string"), |
|
"agency": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"purpose": datasets.Value("string"), |
|
"keywords": datasets.Sequence(datasets.Value("string")), |
|
"comments": datasets.Sequence({ |
|
"text": datasets.Value("string"), |
|
"comment_id": datasets.Value("string"), |
|
"comment_url": datasets.Value("string"), |
|
"comment_date": datasets.Value("string"), |
|
"commenter_fname": datasets.Value("string"), |
|
"commenter_lname": datasets.Value("string"), |
|
"comment_length": datasets.Value("int32") |
|
}) |
|
}) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation = _CITATION |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
api_key = self.config.api_key |
|
docket_ids = self.config.docket_ids |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"api_key": api_key, |
|
"docket_ids": docket_ids |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, api_key, docket_ids): |
|
|
|
dockets = docket_ids |
|
|
|
for docket_id in dockets: |
|
fetcher = RegulationsDataFetcher(docket_id, api_key) |
|
docket_data = fetcher.collect_data() |
|
|
|
if docket_data is None: |
|
print(f"Stopping Data Collection.") |
|
break |
|
|
|
if len(docket_data["comments"]) != 0: |
|
yield docket_id, docket_data |
|
|
|
|
|
|