ro-h commited on
Commit
7fd10e2
1 Parent(s): 457e1ea

Modify to be nested

Browse files
Files changed (2) hide show
  1. docket_comments.json +0 -0
  2. regulatory_comments.py +58 -54
docket_comments.json ADDED
The diff for this file is too large to render. See raw diff
 
regulatory_comments.py CHANGED
@@ -35,7 +35,7 @@ _HOMEPAGE = "https://www.regulations.gov/"
35
  # TODO: Add link to the official dataset URLs here
36
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
37
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
38
- _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/temp.csv"
39
 
40
  }
41
 
@@ -62,71 +62,75 @@ class RegComments(datasets.GeneratorBasedBuilder):
62
  # comment_text = comment_data['data']['attributes']['comment'],
63
  # commenter_name = comment_data['data']['attributes'].get('firstName', '') + " " + comment_data['data']['attributes'].get('lastName', '')
64
  # ) #use pandas
65
-
66
  def _info(self):
67
- print("info called")
68
- features = datasets.Features(
69
- {"docket_agency": datasets.Value("string"),
70
- "docket_title": datasets.Value("string"),
71
- "docket_date": datasets.Value("string"),
72
- "comment_id": datasets.Value("string"),
73
- "comment_date": datasets.Value("string"),
74
- "comment_url": datasets.Value("string"),
75
- "comment_title": datasets.Value("string"),
76
- "commenter_name": datasets.Value("string"),
77
- "comment_length": datasets.Value("int64"),
78
- "comment_text": datasets.Value("string"),
79
- }
80
- )
81
-
82
-
83
  return datasets.DatasetInfo(
84
- # This is the description that will appear on the datasets page.
85
  description=_DESCRIPTION,
86
- # This defines the different columns of the dataset and their types
87
- features=features, # Here we define them above because they are different between the two configurations
88
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
89
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
90
- # supervised_keys=("sentence", "label"),
91
- # Homepage of the dataset for documentaxtion
92
  homepage=_HOMEPAGE
93
  )
94
 
95
  def _split_generators(self, dl_manager):
96
  print("split generators called")
97
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
98
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
99
-
100
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
101
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
102
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
103
  urls = _URLS["url"]
104
- data_dir = dl_manager.download_and_extract(urls)
105
  print("urls accessed")
106
  print(data_dir)
107
- #print("File path:", os.path.join(data_dir, "train.csv"))
108
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
109
- # These kwargs will be passed to _generate_examples
 
110
  gen_kwargs={
111
- "filepath": data_dir,
112
- #"split": "train",
113
- },),]
 
114
 
115
- #method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
116
  def _generate_examples(self, filepath):
117
- print("generate examples called")
118
- with open(filepath, encoding="utf-8") as f:
119
- reader = csv.DictReader(f)
120
- for key, row in enumerate(reader):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  yield key, {
122
- "docket_agency": row["docket_agency"],
123
- "docket_title": row["docket_title"],
124
- "docket_date": row["docket_date"],
125
- "comment_id": row["comment_id"],
126
- "comment_date": row["comment_date"],
127
- "comment_url": row["comment_url"],
128
- "comment_title": row["comment_title"],
129
- "commenter_name": row["commenter_name"],
130
- "comment_length": int(row["comment_length"]),
131
- "comment_text": row["comment_text"],
132
- }
 
35
  # TODO: Add link to the official dataset URLs here
36
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
37
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
38
+ _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments.json"
39
 
40
  }
41
 
 
62
  # comment_text = comment_data['data']['attributes']['comment'],
63
  # commenter_name = comment_data['data']['attributes'].get('firstName', '') + " " + comment_data['data']['attributes'].get('lastName', '')
64
  # ) #use pandas
65
+
66
  def _info(self):
67
+ features = datasets.Features({
68
+ "id": datasets.Value("string"),
69
+ "title": datasets.Value("string"),
70
+ "context": datasets.Value("string"),
71
+ "comments": datasets.Sequence({
72
+ "text": datasets.Value("string"),
73
+ "comment_id": datasets.Value("string"),
74
+ "comment_url": datasets.Value("string"),
75
+ "comment_date": datasets.Value("string"),
76
+ "comment_title": datasets.Value("string"),
77
+ "commenter_fname": datasets.Value("string"),
78
+ "commenter_lname": datasets.Value("string"),
79
+ "comment_length": datasets.Value("int32")
80
+ })
81
+ })
82
+
83
  return datasets.DatasetInfo(
 
84
  description=_DESCRIPTION,
85
+ features=features,
 
 
 
 
 
86
  homepage=_HOMEPAGE
87
  )
88
 
89
  def _split_generators(self, dl_manager):
90
  print("split generators called")
91
+ # URLS should point to where your dataset is located
 
 
 
 
 
92
  urls = _URLS["url"]
93
+ data_dir = dl_manager.download_and_extract(urls)
94
  print("urls accessed")
95
  print(data_dir)
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
  gen_kwargs={
101
+ "filepath": os.path.join(data_dir, 'docket_comments.json'),
102
+ },
103
+ ),
104
+ ]
105
 
 
106
  def _generate_examples(self, filepath):
107
+ """This function returns the examples in the raw (text) form."""
108
+ logger.info("generating examples from = %s", filepath)
109
+ key = 0
110
+ with open(filepath, 'r', encoding='utf-8') as f:
111
+ data = json.load(f)
112
+ for docket in data:
113
+ docket_id = docket["id"]
114
+ docket_title = docket["title"]
115
+ docket_context = docket["context"]
116
+ comments = []
117
+ for comment in docket["comments"]:
118
+ comment_data = {
119
+ "text": comment["text"],
120
+ "comment_id": comment["comment_id"],
121
+ "comment_url": comment["comment_url"],
122
+ "comment_date": comment["comment_date"],
123
+ "comment_title": comment["comment_title"],
124
+ "commenter_fname": comment["commenter_fname"],
125
+ "commenter_lname": comment["commenter_lname"],
126
+ "comment_length": comment["comment_length"]
127
+ }
128
+ comments.append(comment_data)
129
+
130
  yield key, {
131
+ "id": docket_id,
132
+ "title": docket_title,
133
+ "context": docket_context,
134
+ "comments": comments
135
+ }
136
+ key += 1