yes-man-today commited on
Commit
7f1176d
1 Parent(s): a19296d

Adding in comments

Browse files
Files changed (1) hide show
  1. regulatory_comments.py +18 -13
regulatory_comments.py CHANGED
@@ -11,36 +11,36 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- """TODO: Add a description here."""
15
-
16
 
17
  import json
18
  import datasets
19
 
20
-
21
  _DESCRIPTION = """\
22
  United States governmental agencies often make proposed regulations open to the public for comment.
23
- This project will use Regulation.gov public API to aggregate and clean public comments for dockets
24
- related to Medication Assisted Treatment for Opioid Use Disorders.
25
 
26
- The dataset will contain docket metadata, docket text-content, comment metadata, and comment text-content.
 
 
27
  """
28
 
 
29
  _HOMEPAGE = "https://www.regulations.gov/"
30
 
31
-
32
- # TODO: Add link to the official dataset URLs here
33
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
34
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
35
  _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_all.json"}
36
 
37
-
38
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
39
  class RegComments(datasets.GeneratorBasedBuilder):
40
 
 
41
  VERSION = datasets.Version("1.1.0")
42
 
 
43
  def _info(self):
 
44
  features = datasets.Features({
45
  "id": datasets.Value("string"),
46
  "title": datasets.Value("string"),
@@ -59,18 +59,20 @@ class RegComments(datasets.GeneratorBasedBuilder):
59
  })
60
  })
61
 
 
62
  return datasets.DatasetInfo(
63
  description=_DESCRIPTION,
64
  features=features,
65
  homepage=_HOMEPAGE
66
  )
67
 
 
68
  def _split_generators(self, dl_manager):
69
  print("split generators called")
70
- # URLS should point to where your dataset is located
71
  urls = _URLS["url"]
72
  data_dir = dl_manager.download_and_extract(urls)
73
  print("urls accessed")
 
74
  return [
75
  datasets.SplitGenerator(
76
  name=datasets.Split.TRAIN,
@@ -80,6 +82,7 @@ class RegComments(datasets.GeneratorBasedBuilder):
80
  ),
81
  ]
82
 
 
83
  def _generate_examples(self, filepath):
84
  """This function returns the examples in the raw (text) form."""
85
  print("enter generate")
@@ -87,6 +90,7 @@ class RegComments(datasets.GeneratorBasedBuilder):
87
  with open(filepath, 'r', encoding='utf-8') as f:
88
  data = json.load(f)
89
  for docket in data:
 
90
  docket_id = docket["id"]
91
  docket_title = docket["title"]
92
  docket_context = docket["context"]
@@ -94,6 +98,7 @@ class RegComments(datasets.GeneratorBasedBuilder):
94
  docket_keywords = docket.get("keywords", [])
95
  comments = docket["comments"]
96
 
 
97
  yield key, {
98
  "id": docket_id,
99
  "title": docket_title,
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
14
 
15
  import json
16
  import datasets
17
 
18
+ # Description of the dataset
19
  _DESCRIPTION = """\
20
  United States governmental agencies often make proposed regulations open to the public for comment.
21
+ Proposed regulations are organized into "dockets". This project will use Regulation.gov public API
22
+ to aggregate and clean public comments for dockets that mention opioid use.
23
 
24
+ Each example will consist of one docket, and include metadata such as docket id, docket title, etc.
25
+ Each docket entry will also include information about the top 10 comments, including comment metadata
26
+ and comment text.
27
  """
28
 
29
+ # Homepage URL of the dataset
30
  _HOMEPAGE = "https://www.regulations.gov/"
31
 
32
+ # URL to download the dataset
 
 
 
33
  _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_all.json"}
34
 
35
+ # Class definition for handling the dataset
 
36
  class RegComments(datasets.GeneratorBasedBuilder):
37
 
38
+ # Version of the dataset
39
  VERSION = datasets.Version("1.1.0")
40
 
41
+ # Method to define the structure of the dataset
42
  def _info(self):
43
+ # Defining the structure of the dataset
44
  features = datasets.Features({
45
  "id": datasets.Value("string"),
46
  "title": datasets.Value("string"),
 
59
  })
60
  })
61
 
62
+ # Returning the dataset structure
63
  return datasets.DatasetInfo(
64
  description=_DESCRIPTION,
65
  features=features,
66
  homepage=_HOMEPAGE
67
  )
68
 
69
+ # Method to handle dataset splitting (e.g., train/test)
70
  def _split_generators(self, dl_manager):
71
  print("split generators called")
 
72
  urls = _URLS["url"]
73
  data_dir = dl_manager.download_and_extract(urls)
74
  print("urls accessed")
75
+ # Defining the split (here, only train split is defined)
76
  return [
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TRAIN,
 
82
  ),
83
  ]
84
 
85
+ # Method to generate examples from the dataset
86
  def _generate_examples(self, filepath):
87
  """This function returns the examples in the raw (text) form."""
88
  print("enter generate")
 
90
  with open(filepath, 'r', encoding='utf-8') as f:
91
  data = json.load(f)
92
  for docket in data:
93
+ # Extracting data fields from each docket
94
  docket_id = docket["id"]
95
  docket_title = docket["title"]
96
  docket_context = docket["context"]
 
98
  docket_keywords = docket.get("keywords", [])
99
  comments = docket["comments"]
100
 
101
+ # Yielding each docket with its information
102
  yield key, {
103
  "id": docket_id,
104
  "title": docket_title,