File size: 9,566 Bytes
4e9ea54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
083b21d
4e9ea54
0821c02
4e9ea54
 
 
 
 
 
 
 
 
 
 
 
 
93cbd74
4215748
 
 
93cbd74
 
4215748
93cbd74
4215748
4e9ea54
 
 
 
 
cf669b7
4e9ea54
cf669b7
4e9ea54
cf669b7
 
 
 
4e9ea54
 
 
 
cf669b7
4e9ea54
 
 
a633346
a98db89
a633346
 
4e9ea54
 
 
 
 
 
cf669b7
4e9ea54
 
 
 
 
 
 
 
a633346
a98db89
a633346
 
4e9ea54
 
 
cf669b7
4e9ea54
 
cf669b7
 
4e9ea54
 
 
 
 
083b21d
4e9ea54
 
 
083b21d
 
d664719
4e9ea54
083b21d
d664719
4e9ea54
083b21d
4e9ea54
 
ff81fc0
4e9ea54
82b195a
 
4e9ea54
 
 
 
 
0024344
82b195a
4e9ea54
083b21d
 
 
4e9ea54
82b195a
4e9ea54
083b21d
 
82b195a
083b21d
 
82b195a
0024344
d152bd8
82b195a
 
 
 
 
 
d152bd8
 
 
82b195a
 
4e9ea54
 
cf669b7
083b21d
 
 
0821c02
 
24ff7e7
0821c02
24ff7e7
586c1f2
 
4e9ea54
0821c02
4e9ea54
 
0821c02
 
 
 
93cbd74
0821c02
 
586c1f2
0821c02
4e9ea54
 
 
 
 
ac7b0e0
4e9ea54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93cbd74
 
 
4e9ea54
 
 
0821c02
 
24ff7e7
0821c02
 
4e9ea54
 
 
 
0821c02
24ff7e7
4e9ea54
 
 
 
24ff7e7
4e9ea54
24ff7e7
cf669b7
db36a72
904abf8
 
d664719
 
a98db89
d664719
 
904abf8
 
4e9ea54
db36a72
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import requests
import re
import datasets
from datasets import BuilderConfig

_DESCRIPTION = """\
United States governmental agencies often make proposed regulations open to the public for comment. 
Proposed regulations are organized into "dockets". This dataset will use Regulation.gov public API 
to aggregate and clean public comments for dockets that mention opioid use. 

Each example will consist of one docket, and include metadata such as docket id, docket title, etc. 
Each docket entry will also include information about the top 10 comments, including comment metadata
and comment text. 
"""

# Homepage URL of the dataset
_HOMEPAGE = "https://www.regulations.gov/"
_CITATION = """@misc{ro_huang_regulatory_2023-1,
	author = {{Ro Huang}},
	date = {2023-03-19},
	publisher = {Hugging Face},
	title = {Regulatory Comments {API} Call},
	url = {https://huggingface.co/datasets/ro-h/regulatory_comments_api},
	version = {1.1.4},
	bdsk-url-1 = {https://huggingface.co/datasets/ro-h/regulatory_comments_api}}
"""

class RegulationsDataFetcher:
    BASE_COMMENT_URL = 'https://api.regulations.gov/v4/comments'
    BASE_DOCKET_URL = 'https://api.regulations.gov/v4/dockets/'

    def __init__(self, docket_id, api_key):
        self.docket_id = docket_id
        self.api_key = api_key
        self.docket_url = self.BASE_DOCKET_URL + docket_id
        self.headers = {
            'X-Api-Key': self.api_key,
            'Content-Type': 'application/json'
        }

    def fetch_comments(self):
        """Fetch a single page of 25 comments."""
        url = f'{self.BASE_COMMENT_URL}?filter[docketId]={self.docket_id}&page[number]=1&page[size]=25'
        response = requests.get(url, headers=self.headers)
        
        if response.status_code == 200:
            return response.json()
        elif response.status_code == 429:
            print(f'API Rate Limit Reached.')
            return None
        
        else:
            print(f'Failed to retrieve comments: {response.status_code}')
            return None

    def get_docket_info(self):
        """Get docket information."""
        response = requests.get(self.docket_url, headers=self.headers)
        
        if response.status_code == 200:
            docket_data = response.json()
            return (docket_data['data']['attributes']['agencyId'],
                    docket_data['data']['attributes']['title'],
                    docket_data['data']['attributes']['modifyDate'], 
                    docket_data['data']['attributes']['docketType'], 
                    docket_data['data']['attributes']['keywords'])
        elif response.status_code == 429:
            print(f'API Rate Limit Reached.')
            return None
        
        else:
            print(f'Failed to retrieve docket info: {response.status_code}')
            return None
        
    def fetch_comment_details(self, comment_url):
        """Fetch detailed information of a comment."""
        response = requests.get(comment_url, headers=self.headers)
    
        if response.status_code == 200:
            return response.json()
        else:
            print(f'Failed to retrieve comment details: {response.status_code}')
            return None
        
    def collect_data(self):
        """Collect data and reshape into nested dictionary format."""
        data = self.fetch_comments()
        if not data:
            return None

        docket_info = self.get_docket_info()
        if not docket_info:
            return None

        # Starting out with docket information
        nested_data = {
            "id": self.docket_id,
            "agency": self.docket_id.split('-')[0],
            "title": docket_info[1] if docket_info else "Unknown Title",
            "update_date": docket_info[2].split('T')[0] if docket_info and docket_info[2] else "Unknown Update Date",
            "update_time": docket_info[2].split('T')[1].strip('Z') if docket_info and docket_info[2] and 'T' in docket_info[2] else "Unknown Update Time",
            "purpose": docket_info[3],
            "keywords": docket_info[4],
            "comments": []
        }

        # Going into each docket for comment information
        if 'data' in data:
            for comment in data['data']:
                if len(nested_data["comments"]) >= 10:
                    break

                comment_details = self.fetch_comment_details(comment['links']['self'])
                if 'data' in comment_details and 'attributes' in comment_details['data']:
                    comment_data = comment_details['data']['attributes']

                    # Basic comment text cleaning
                    comment_text = (comment_data.get('comment', '') or '').strip()
                    comment_text = comment_text.replace("<br/>", "").replace("<span style='padding-left: 30px'></span>", "")
                    comment_text = re.sub(r'&[^;]+;', '', comment_text)

                    # Recording detailed comment information
                    if (comment_text and "attached" not in comment_text.lower() and "attachment" not in comment_text.lower() and comment_text.lower() != "n/a"):
                        nested_comment = {
                            "text": comment_text,
                            "comment_id": comment['id'],
                            "comment_url": comment['links']['self'],
                            "comment_date": comment['attributes']['postedDate'].split('T')[0],
                            "comment_time": comment['attributes']['postedDate'].split('T')[1].strip('Z'),
                            "commenter_fname": ((comment_data.get('firstName') or 'Anonymous').split(',')[0]).capitalize(),
                            "commenter_lname": ((comment_data.get('lastName') or 'Anonymous').split(',')[0]).capitalize(),
                            "comment_length": len(comment_text) if comment_text is not None else 0
                        }
                        nested_data["comments"].append(nested_comment)

        return nested_data

        


    
class RegCommentsAPIConfig(BuilderConfig):
    def __init__(self, api_key=None, docket_ids = None, **kwargs):
        self.api_key = api_key
        self.docket_ids = docket_ids
        super(RegCommentsAPIConfig, self).__init__(**kwargs)
        

class RegCommentsAPI(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.1.0")

    BUILDER_CONFIGS = [
        RegCommentsAPIConfig(
            name="default",
            version=datasets.Version("1.0.0"),
            description="Dataset of regulatory comments"
        )
    ]
    BUILDER_CONFIG_CLASS = RegCommentsAPIConfig

    # Method to define the structure of the dataset
    def _info(self):
        # Defining the structure of the dataset
        features = datasets.Features({
            "id": datasets.Value("string"),
            "agency": datasets.Value("string"),
            "title": datasets.Value("string"),
            "context": datasets.Value("string"),
            "purpose": datasets.Value("string"),
            "keywords": datasets.Sequence(datasets.Value("string")),
            "comments": datasets.Sequence({
                "text": datasets.Value("string"),
                "comment_id": datasets.Value("string"),
                "comment_url": datasets.Value("string"),
                "comment_date": datasets.Value("string"),
                "commenter_fname": datasets.Value("string"),
                "commenter_lname": datasets.Value("string"),
                "comment_length": datasets.Value("int32")
            })
        })

        # Returning the dataset structure
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE, 
            citation = _CITATION
            
        )
    
    def _split_generators(self, dl_manager):
        # Retrieve the API key from the builder's config
        api_key = self.config.api_key
        docket_ids = self.config.docket_ids

        # Define your dataset's splits. In this case, only a training split.
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "api_key": api_key,  # Pass the API key to the generator function
                    "docket_ids": docket_ids
                },
            ),
        ]

    def _generate_examples(self, api_key, docket_ids):
        # Iterate over each search term to fetch relevant dockets
        dockets = docket_ids

        for docket_id in dockets:
            fetcher = RegulationsDataFetcher(docket_id, api_key)  # Initialize with the API key
            docket_data = fetcher.collect_data()

            if docket_data is None:
                print(f"Stopping Data Collection.")
                break

            if len(docket_data["comments"]) != 0:
                yield docket_id, docket_data