xx103's picture
Rename data_processing_final.py to data_processing.py
546a834 verified
raw
history blame
6.6 kB
from botocore import UNSIGNED
from botocore.client import Config
import pandas as pd
import boto3
from datetime import datetime
from uszipcode import SearchEngine
import numpy as np
# Initialize the S3 client
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
# Download 3 json files in S3 bucket, specified bucket name, file keys, and file names
bucket_name = 'sta663project1'
file_keys = ['NYC_collisions_data.json', 'NYC_borough_data.json', 'NYC_weather_data.json']
local_file_names = ['NYC_collisions_data.json', 'NYC_borough_data.json', 'NYC_weather_data.json']
for file_key, local_file_name in zip(file_keys, local_file_names):
s3.download_file(bucket_name, file_key, local_file_name)
# Load each file into a DataFrame, df is NYC collisions data, df2 is NYC borough data, df3 is NYC weather data
df = pd.read_json(local_file_names[0])
df2 = pd.read_json(local_file_names[1])
df3 = pd.read_json(local_file_names[2])
# convert 'CRASH TIME' to datetime to further extract the hours and minutes
df['CRASH TIME'] = pd.to_datetime(df['CRASH TIME'], format='%H:%M')
# In NYC collisions data, update 'CRASH TIME' to 'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES', 'STREET NAME' and 'STREET TYPE'
for index, row in df.iterrows():
hour = row['CRASH TIME'].hour
period_start = (hour // 3) * 3
period_end = period_start + 2
df.at[index, 'CRASH TIME PERIOD'] = f"{period_start:02d}:00-{period_end:02d}:59"
factors = [row[f'CONTRIBUTING FACTOR VEHICLE {i}'] for i in range(1, 6) if row.get(f'CONTRIBUTING FACTOR VEHICLE {i}')]
df.at[index, 'CONTRIBUTING FACTOR VEHICLES'] = ', '.join(factors)
vehicle_types = [row[f'VEHICLE TYPE CODE {i}'] for i in range(1, 6) if row.get(f'VEHICLE TYPE CODE {i}')]
df.at[index, 'VEHICLE TYPES'] = ', '.join(vehicle_types)
street_names = []
street_types = []
# Check and append 'ON STREET NAME'
if pd.notna(row['ON STREET NAME']) and row['ON STREET NAME'] != '':
street_names.append(row['ON STREET NAME'])
street_types.append('ON STREET')
# Check and append 'CROSS STREET NAME'
if pd.notna(row['CROSS STREET NAME']) and row['CROSS STREET NAME'] != '':
street_names.append(row['CROSS STREET NAME'])
street_types.append('CROSS STREET')
# Check and append 'OFF STREET NAME'
if pd.notna(row['OFF STREET NAME']) and row['OFF STREET NAME'] != '':
street_names.append(row['OFF STREET NAME'])
street_types.append('OFF STREET')
# Join the names and types with a comma
df.at[index, 'STREET NAME'] = ', '.join(street_names)
df.at[index, 'STREET TYPE'] = ', '.join(street_types)
# Convert number of injured, and number of killed columns to numeric
numeric_columns = ['NUMBER OF PERSONS INJURED', 'NUMBER OF PEDESTRIANS INJURED', 'NUMBER OF CYCLIST INJURED', 'NUMBER OF MOTORIST INJURED',
'NUMBER OF PERSONS KILLED', 'NUMBER OF PEDESTRIANS KILLED', 'NUMBER OF CYCLIST KILLED', 'NUMBER OF MOTORIST KILLED']
for column in numeric_columns:
df[column] = pd.to_numeric(df[column], errors='coerce').fillna(0).astype(int)
# add new columns 'NUMBER OF INJURIES' and 'NUMBER OF DEATHS'
df['NUMBER OF INJURIES'] = df['NUMBER OF PERSONS INJURED'] + df['NUMBER OF PEDESTRIANS INJURED'] + df['NUMBER OF CYCLIST INJURED'] + df['NUMBER OF MOTORIST INJURED']
df['NUMBER OF DEATHS'] = df['NUMBER OF PERSONS KILLED'] + df['NUMBER OF PEDESTRIANS KILLED'] + df['NUMBER OF CYCLIST KILLED'] + df['NUMBER OF MOTORIST KILLED']
# Keeping only the necessary columns that are needed to merge with df2 and df3
columns_to_keep = [
'CRASH DATE', 'BOROUGH', 'ZIP CODE', 'LATITUDE', 'LONGITUDE', 'COLLISION_ID',
'CRASH TIME PERIOD', 'CONTRIBUTING FACTOR VEHICLES', 'VEHICLE TYPES',
'NUMBER OF INJURIES', 'NUMBER OF DEATHS', 'STREET NAME', 'STREET TYPE'
]
df = df[columns_to_keep]
# Create a SearchEngine object
search = SearchEngine()
# Convert 'LATITUDE' and 'LONGITUDE' in NYC collisions data to floats
df['LATITUDE'] = pd.to_numeric(df['LATITUDE'], errors='coerce')
df['LONGITUDE'] = pd.to_numeric(df['LONGITUDE'], errors='coerce')
# fill in the missing 'ZIP CODE' if it has valid 'LATITUDE' and 'LONGITUDE'
for index, row in df.iterrows():
# Check if 'ZIP CODE' is an empty string and 'LATITUDE' and 'LONGITUDE' are valid
if row['ZIP CODE'] == '' and not (pd.isna(row['LATITUDE']) or row['LATITUDE'] == 0) and not (pd.isna(row['LONGITUDE']) or row['LONGITUDE'] == 0):
result = search.by_coordinates(lat=row['LATITUDE'], lng=row['LONGITUDE'], returns=1)
if result:
# Set the 'ZIP CODE' to the found zip code
df.at[index, 'ZIP CODE'] = result[0].zipcode
# Convert the 'Borough' column in NYC borough data to uppercase
df2['Borough'] = df2['Borough'].str.upper()
# Create a mapping dictionary from ZIP Code to Borough
zip_to_borough = df2.set_index('ZIP Code')['Borough'].to_dict()
# Write a function update_borough to update BOROUGH based on ZIP CODE
def update_borough(row):
if pd.isna(row['BOROUGH']) or row['BOROUGH'] == '':
return zip_to_borough.get(row['ZIP CODE'], row['BOROUGH'])
else:
return row['BOROUGH']
# Apply the function to each row in NYC collisions data
df['BOROUGH'] = df.apply(update_borough, axis=1)
# Keep only the specified columns in NYC weather data
df3 = df3[['datetime', 'description', 'precip', 'preciptype', 'tempmax', 'tempmin']]
# Rename the columns in NYC weather data
df3.rename(columns={
'description': 'WEATHER DESCRIPTION',
'precip': 'PRECIPITATION',
'preciptype': 'PRECIPITATION TYPE',
'tempmax': 'TEMPMAX',
'tempmin': 'TEMPMIN'
}, inplace=True)
# Convert 'CRASH DATE' to datetime and remove the time component
df['CRASH DATE'] = pd.to_datetime(df['CRASH DATE']).dt.date
# Convert 'datetime' in NYC weather data to datetime and remove the time component
df3['datetime'] = pd.to_datetime(df3['datetime']).dt.date
# Merge NYC collisions data and NYC weather data on 'CRASH DATE' and 'datetime' respectively, using left join.
merged_df = pd.merge(left=df, right=df3, how='left', left_on='CRASH DATE', right_on='datetime')
# Drop the 'datetime' column from df3 as it's redundant now
merged_df.drop(columns=['datetime'], inplace=True)
# Convert 'CRASH DATE' column to string to avoid messy date columns and conversion issues in Hugging Face
merged_df['CRASH DATE'] = merged_df['CRASH DATE'].astype(str)
# Replace empty values with NaN
merged_df = merged_df.replace('', np.nan)
# Print the first row of merged_df
print(merged_df.iloc[0])