ro-h commited on
Commit
4e9ea54
1 Parent(s): 14fc47f

Upload regulatory_comments_api.py

Browse files
Files changed (1) hide show
  1. regulatory_comments_api.py +215 -0
regulatory_comments_api.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import requests
17
+ import datasets
18
+
19
+ _DESCRIPTION = """\
20
+ United States governmental agencies often make proposed regulations open to the public for comment.
21
+ Proposed regulations are organized into "dockets". This dataset will use Regulation.gov public API
22
+ to aggregate and clean public comments for dockets that mention opioid use.
23
+
24
+ Each example will consist of one docket, and include metadata such as docket id, docket title, etc.
25
+ Each docket entry will also include information about the top 10 comments, including comment metadata
26
+ and comment text.
27
+ """
28
+
29
+ # Homepage URL of the dataset
30
+ _HOMEPAGE = "https://www.regulations.gov/"
31
+
32
+ # URL to download the dataset
33
+ _URLS = {"url": "https://huggingface.co/datasets/ro-h/regulatory_comments/raw/main/docket_comments_all.json"}
34
+
35
+ class RegulationsDataFetcher:
36
+ API_KEY = "IsH1c1CAB0CR8spovnnx2INbLz8gQlVkbmXYII2z" #'4T29l93SvmnyNCVFZUFzSfUqTq6k7S0Wqn93sLcH'
37
+ BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments'
38
+ BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/'
39
+ HEADERS = {
40
+ 'X-Api-Key': API_KEY,
41
+ 'Content-Type': 'application/json'
42
+ }
43
+
44
+ def __init__(self, docket_id):
45
+ self.docket_id = docket_id
46
+ self.docket_url = self.BASE_DOCKET_URL + docket_id
47
+ self.dataset = []
48
+
49
+ def fetch_comments(self):
50
+ """Fetch a single page of 25 comments."""
51
+ url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25'
52
+ response = requests.get(url, headers=self.HEADERS)
53
+
54
+ if response.status_code == 200:
55
+ return response.json()
56
+ else:
57
+ print(f'Failed to retrieve comments: {response.status_code}')
58
+ return None
59
+
60
+ def get_docket_info(self):
61
+ """Get docket information."""
62
+ response = requests.get(self.docket_url, headers=self.HEADERS)
63
+
64
+ if response.status_code == 200:
65
+ docket_data = response.json()
66
+ return (docket_data['data']['attributes']['agencyId'],
67
+ docket_data['data']['attributes']['title'],
68
+ docket_data['data']['attributes']['modifyDate'],
69
+ docket_data['data']['attributes']['docketType'],
70
+ docket_data['data']['attributes']['keywords'])
71
+ else:
72
+ print(f'Failed to retrieve docket info: {response.status_code}')
73
+ return None
74
+
75
+ def fetch_comment_details(self, comment_url):
76
+ """Fetch detailed information of a comment."""
77
+ response = requests.get(comment_url, headers=self.HEADERS)
78
+ if response.status_code == 200:
79
+ return response.json()
80
+ else:
81
+ print(f'Failed to retrieve comment details: {response.status_code}')
82
+ return None
83
+
84
+ def collect_data(self):
85
+ """Collect data and reshape into nested dictionary format."""
86
+ data = self.fetch_comments()
87
+ docket_info = self.get_docket_info()
88
+
89
+ # Initialize the nested dictionary structure
90
+ nested_data = {
91
+ "id": self.docket_id,
92
+ "title": docket_info[1] if docket_info else "Unknown Title",
93
+ "context": docket_info[2] if docket_info else "Unknown Context",
94
+ "purpose": docket_info[3],
95
+ "keywords": docket_info[4],
96
+ "comments": []
97
+ }
98
+
99
+ if data and 'data' in data:
100
+ for comment in data['data']:
101
+ comment_details = self.fetch_comment_details(comment['links']['self'])
102
+
103
+ if comment_details and 'data' in comment_details and 'attributes' in comment_details['data']:
104
+ comment_data = comment_details['data']['attributes']
105
+ nested_comment = {
106
+ "text": comment_data.get('comment', ''),
107
+ "comment_id": comment['id'],
108
+ "comment_url": comment['links']['self'],
109
+ "comment_date": comment['attributes']['postedDate'],
110
+ "comment_title": comment['attributes']['title'],
111
+ "commenter_fname": comment_data.get('firstName', ''),
112
+ "commenter_lname": comment_data.get('lastName', ''),
113
+ "comment_length": len(comment_data.get('comment', '')) if comment_data.get('comment') is not None else 0
114
+ }
115
+ nested_data["comments"].append(nested_comment)
116
+
117
+ if len(nested_data["comments"]) >= 10:
118
+ break
119
+
120
+ return nested_data
121
+
122
+ class RegComments(datasets.GeneratorBasedBuilder):
123
+ VERSION = datasets.Version("1.1.0")
124
+
125
+ # Method to define the structure of the dataset
126
+ def _info(self):
127
+ # Defining the structure of the dataset
128
+ features = datasets.Features({
129
+ "id": datasets.Value("string"),
130
+ "title": datasets.Value("string"),
131
+ "context": datasets.Value("string"),
132
+ "purpose": datasets.Value("string"),
133
+ "keywords": datasets.Sequence(datasets.Value("string")),
134
+ "comments": datasets.Sequence({
135
+ "text": datasets.Value("string"),
136
+ "comment_id": datasets.Value("string"),
137
+ "comment_url": datasets.Value("string"),
138
+ "comment_date": datasets.Value("string"),
139
+ "comment_title": datasets.Value("string"),
140
+ "commenter_fname": datasets.Value("string"),
141
+ "commenter_lname": datasets.Value("string"),
142
+ "comment_length": datasets.Value("int32")
143
+ })
144
+ })
145
+
146
+ # Returning the dataset structure
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=features,
150
+ homepage=_HOMEPAGE
151
+ )
152
+
153
+ def _split_generators(self, dl_manager):
154
+ # Expect an API key to be passed as a parameter
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={
159
+ "search_terms": opioid_related_terms,
160
+ "api_key": self.config.api_key, # Use the API key provided by the user
161
+ },
162
+ ),
163
+ ]
164
+
165
+ def _generate_examples(self, search_terms, api_key):
166
+ # Iterate over each search term to fetch relevant dockets
167
+ for term in search_terms:
168
+ docket_ids = get_docket_ids(term, api_key) # Pass the API key here
169
+ for docket_id in docket_ids:
170
+ fetcher = RegulationsDataFetcher(docket_id, api_key) # Initialize with the API key
171
+ docket_data = fetcher.collect_data()
172
+ if len(docket_data["comments"]) != 0:
173
+ yield docket_id, docket_data
174
+
175
+ # Modify the get_docket_ids function to accept an API key
176
+ def get_docket_ids(search_term, api_key):
177
+ url = f"https://api.regulations.gov/v4/dockets"
178
+ params = {
179
+ 'filter[searchTerm]': search_term,
180
+ 'api_key': api_key
181
+ }
182
+ response = requests.get(url, params=params)
183
+ if response.status_code == 200:
184
+ data = response.json()
185
+ dockets = data['data']
186
+ docket_ids = [docket['id'] for docket in dockets]
187
+ return docket_ids
188
+ else:
189
+ return f"Error: {response.status_code}"
190
+
191
+ opioid_related_terms = [
192
+ # Types of Opioids
193
+ "opioids",
194
+ "heroin",
195
+ "morphine",
196
+ "fentanyl",
197
+ "methadone",
198
+ "oxycodone",
199
+ "hydrocodone",
200
+ "codeine",
201
+ "tramadol",
202
+ "prescription opioids",
203
+ # Withdrawal Support
204
+ "lofexidine",
205
+ "buprenorphine",
206
+ "naloxone",
207
+ # Related Phrases
208
+ "opioid epidemic",
209
+ "opioid abuse",
210
+ "opioid crisis",
211
+ "opioid overdose"
212
+ "opioid tolerance",
213
+ "opioid treatment program",
214
+ "medication assisted treatment",
215
+ ]