path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122247715/cell_22
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['age'].isnull().sum()
code
122247715/cell_27
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['embark_town'] = titanic['embark_town'].fillna(titanic['embark_town'].mode()[0]) titanic['embark_town'].isnull().sum()
code
122247715/cell_36
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['adult_male'].value_counts()
code
88092005/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import random import geocoder import geopy import plotly.express as px
code
2005328/cell_13
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) def process_text(text): nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [w for w in nopunc.split() if w.lower() not in stopwords.words('english')] return clean_words messages['text'].apply(process_text).head() pipeline = Pipeline([('bow', CountVectorizer(analyzer=process_text)), ('tfidf', TfidfTransformer()), ('classifier', MultinomialNB())]) pipeline.fit(X_train, y_train)
code
2005328/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) f, ax = plt.subplots(figsize=(12, 8)) sns.stripplot(x="class", y="words_len", data=messages) plt.title('Number of words per class') f, ax = plt.subplots(figsize=(12, 8)) sns.stripplot(x='class', y='char_len', data=messages) plt.title('Number of characters per class')
code
2005328/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) messages.head()
code
2005328/cell_19
[ "text_plain_output_1.png" ]
from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.manifold import TSNE import hypertools as hyp import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) from sklearn.feature_extraction.text import TfidfVectorizer vectors = TfidfVectorizer().fit_transform(messages.text) X_reduced = TruncatedSVD(n_components=100, random_state=0).fit_transform(vectors) tsne = TSNE(n_components=2, perplexity=110, verbose=2).fit_transform(X_reduced) import hypertools as hyp hyp.plot(tsne, 'o', group=messages['class'], legend=list({'ham': 0, 'spam': 1}))
code
2005328/cell_18
[ "text_plain_output_1.png" ]
from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.manifold import TSNE import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) from sklearn.feature_extraction.text import TfidfVectorizer vectors = TfidfVectorizer().fit_transform(messages.text) X_reduced = TruncatedSVD(n_components=100, random_state=0).fit_transform(vectors) tsne = TSNE(n_components=2, perplexity=110, verbose=2).fit_transform(X_reduced)
code
2005328/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) f, ax = plt.subplots(figsize=(12, 8)) sns.stripplot(x='class', y='words_len', data=messages) plt.title('Number of words per class')
code
2005328/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.metrics import classification_report,confusion_matrix from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) def process_text(text): nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [w for w in nopunc.split() if w.lower() not in stopwords.words('english')] return clean_words messages['text'].apply(process_text).head() pipeline = Pipeline([('bow', CountVectorizer(analyzer=process_text)), ('tfidf', TfidfTransformer()), ('classifier', MultinomialNB())]) pipeline.fit(X_train, y_train) predictions = pipeline.predict(X_test) print(classification_report(y_test, predictions))
code
2005328/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.metrics import classification_report,confusion_matrix from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) f, ax = plt.subplots(figsize=(12, 8)) sns.stripplot(x="class", y="words_len", data=messages) plt.title('Number of words per class') f, ax = plt.subplots(figsize=(12, 8)) sns.stripplot(x="class", y="char_len", data=messages) plt.title('Number of characters per class') def process_text(text): nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [w for w in nopunc.split() if w.lower() not in stopwords.words('english')] return clean_words messages['text'].apply(process_text).head() pipeline = Pipeline([('bow', CountVectorizer(analyzer=process_text)), ('tfidf', TfidfTransformer()), ('classifier', MultinomialNB())]) pipeline.fit(X_train, y_train) predictions = pipeline.predict(X_test) import seaborn as sns sns.heatmap(confusion_matrix(y_test, predictions), annot=True)
code
2005328/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.head()
code
2005328/cell_10
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string messages = pd.read_csv('../input/spam.csv', encoding='latin-1') messages.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True) messages = messages.rename(columns={'v1': 'class', 'v2': 'text'}) def process_text(text): nopunc = [char for char in text if char not in string.punctuation] nopunc = ''.join(nopunc) clean_words = [w for w in nopunc.split() if w.lower() not in stopwords.words('english')] return clean_words messages['text'].apply(process_text).head()
code
128029591/cell_21
[ "image_output_1.png" ]
0.101 * 1141 + 16.57
code
128029591/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data sns.relplot(data=cleaned_cars_data, x='EngineSize', y='CO2', hue='PropulsionType', aspect=2)
code
128029591/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data sns.relplot(data=cleaned_cars_data, x='EngineSize', y='CO2', aspect=2)
code
128029591/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data diesel_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Diesel'].copy() diesel_data
code
128029591/cell_20
[ "image_output_1.png" ]
0.101 * 1355 + 16.57
code
128029591/cell_29
[ "text_plain_output_1.png" ]
0.0604 * 998 + 57.087
code
128029591/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data sns.relplot(data=cleaned_cars_data, x='Mass', y='CO2', hue='PropulsionType', aspect=2)
code
128029591/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data sns.relplot(data=cleaned_cars_data, x='Mass', y='CO2', aspect=2)
code
128029591/cell_18
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data input_features = ['Mass'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) print('Coefficients: ', linear_model.coef_.round(3)) print('Intercept: ', linear_model.intercept_.round(3)) target_pred = linear_model.predict(input_test) print('R²: ', r2_score(target_test, target_pred).round(3))
code
128029591/cell_28
[ "text_plain_output_1.png" ]
0.0604 * 1398 + 57.087
code
128029591/cell_16
[ "image_output_1.png" ]
import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data
code
128029591/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data
code
128029591/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data input_features = ['Mass'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['Mass', 'EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) print('Coefficients: ', linear_model.coef_.round(4)) print('Intercept: ', linear_model.intercept_.round(3)) target_pred = linear_model.predict(input_test) print('R²: ', r2_score(target_test, target_pred).round(3))
code
128029591/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data input_features = ['Mass'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) print('Coefficients: ', linear_model.coef_.round(4)) print('Intercept: ', linear_model.intercept_.round(3)) target_pred = linear_model.predict(input_test) print('R²: ', r2_score(target_test, target_pred).round(3))
code
128029591/cell_22
[ "text_html_output_1.png" ]
0.101 * 1177 + 16.57
code
128029591/cell_27
[ "text_plain_output_1.png" ]
0.0604 * 1598 + 57.087
code
128029591/cell_37
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data input_features = ['Mass'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['Mass', 'EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) diesel_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Diesel'].copy() diesel_data input_features = ['Mass'] input_data = diesel_data[input_features] target_data = diesel_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['EngineSize'] input_data = diesel_data[input_features] target_data = diesel_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) print('Coefficients: ', linear_model.coef_.round(4)) print('Intercept: ', linear_model.intercept_.round(3)) target_pred = linear_model.predict(input_test) print('R²: ', r2_score(target_test, target_pred).round(3))
code
128029591/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data
code
128029591/cell_36
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split import pandas as pd cars_data = pd.read_csv('../input/aqalds/AQA-large-data-set.csv') cars_data cleaned_cars_data = cars_data[(cars_data['EngineSize'] > 0) & (cars_data['Mass'] > 0) & (cars_data['CO2'] > 0)].copy() cleaned_cars_data['PropulsionType'] = cars_data['PropulsionTypeId'].replace({1: 'Petrol', 2: 'Diesel', 3: 'Electric', 7: 'Gas/Petrol', 8: 'Electric/Petrol'}) cleaned_cars_data petrol_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Petrol'].copy() petrol_data input_features = ['Mass'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) input_features = ['Mass', 'EngineSize'] input_data = petrol_data[input_features] target_data = petrol_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) target_pred = linear_model.predict(input_test) diesel_data = cleaned_cars_data[cleaned_cars_data['PropulsionType'] == 'Diesel'].copy() diesel_data input_features = ['Mass'] input_data = diesel_data[input_features] target_data = diesel_data['CO2'] input_train, input_test, target_train, target_test = train_test_split(input_data, target_data, train_size=0.75, random_state=1) linear_model = LinearRegression().fit(input_train, target_train) print('Coefficients: ', linear_model.coef_.round(4)) print('Intercept: ', linear_model.intercept_.round(3)) target_pred = linear_model.predict(input_test) print('R²: ', r2_score(target_test, target_pred).round(3))
code
122255316/cell_25
[ "text_plain_output_1.png" ]
cat_missing_cols = ['country'] cat_missing_cols
code
122255316/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') data.info()
code
122255316/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 print('Samples Before Removal : {}'.format(df_temp.shape[0])) df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) print('Samples After Removal : {}'.format(df_temp.shape[0]))
code
122255316/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] print(num_cols) df_temp = df_temp[num_cols]
code
122255316/cell_33
[ "text_html_output_1.png" ]
from sklearn.impute import KNNImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp) X = knn.transform(df_temp) df_temp = pd.DataFrame(X, columns=num_cols) df_temp
code
122255316/cell_44
[ "text_plain_output_1.png" ]
from sklearn.ensemble import BaggingRegressor from sklearn.impute import KNNImputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') import missingno as msno from matplotlib import pyplot as plt msno.matrix(data) missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c msno.dendrogram(data[missing_c]) data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp) X = knn.transform(df_temp) df_temp = pd.DataFrame(X, columns=num_cols) df_temp df_temp = data.copy() from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor def tree_imputation(df): missing_cols = [col for col in df.columns if df[col].isnull().sum() > 0] non_missing_cols = [col for col in df.columns if df[col].isnull().sum() == 0] for col in missing_cols: model = BaggingRegressor(DecisionTreeRegressor(), n_estimators=40, max_samples=1.0, max_features=1.0, bootstrap=False, n_jobs=-1) col_missing = df[df[col].isnull()] temp = df.drop(df[df[col].isnull()].index, axis=0) X = temp.loc[:, non_missing_cols] y = temp[col] model.fit(X, y) y_pred = model.predict(col_missing[non_missing_cols]) df.loc[col_missing.index, col] = y_pred return df df_new = tree_imputation(df_temp) msno.bar(df_new) plt.show()
code
122255316/cell_29
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() df_temp
code
122255316/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() cat_missing_cols = ['country'] cat_missing_cols data[cat_missing_cols] = data[cat_missing_cols].fillna('Missing') data
code
122255316/cell_41
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp) X = knn.transform(df_temp) df_temp = pd.DataFrame(X, columns=num_cols) df_temp df_temp = data.copy() df_temp
code
122255316/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data
code
122255316/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist()
code
122255316/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122255316/cell_7
[ "text_html_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') import missingno as msno from matplotlib import pyplot as plt msno.matrix(data) msno.heatmap(data, labels=True)
code
122255316/cell_45
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingRegressor from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp) X = knn.transform(df_temp) df_temp = pd.DataFrame(X, columns=num_cols) df_temp df_temp = data.copy() from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() cat_cols = [col for col in data.columns if data[col].dtype == 'object'] for col in cat_cols: df_temp[col] = lb.fit_transform(df_temp[col]) from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor def tree_imputation(df): missing_cols = [col for col in df.columns if df[col].isnull().sum() > 0] non_missing_cols = [col for col in df.columns if df[col].isnull().sum() == 0] for col in missing_cols: model = BaggingRegressor(DecisionTreeRegressor(), n_estimators=40, max_samples=1.0, max_features=1.0, bootstrap=False, n_jobs=-1) col_missing = df[df[col].isnull()] temp = df.drop(df[df[col].isnull()].index, axis=0) X = temp.loc[:, non_missing_cols] y = temp[col] model.fit(X, y) y_pred = model.predict(col_missing[non_missing_cols]) df.loc[col_missing.index, col] = y_pred return df df_new = tree_imputation(df_temp) df_new = pd.concat([data[cat_cols], df_new.drop(cat_cols, axis=1)], axis=1) df_new.head()
code
122255316/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) _ = get_numerical_summary(data_temp)
code
122255316/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c
code
122255316/cell_43
[ "text_html_output_1.png" ]
from sklearn.ensemble import BaggingRegressor from sklearn.impute import KNNImputer from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp) X = knn.transform(df_temp) df_temp = pd.DataFrame(X, columns=num_cols) df_temp df_temp = data.copy() from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor def tree_imputation(df): missing_cols = [col for col in df.columns if df[col].isnull().sum() > 0] non_missing_cols = [col for col in df.columns if df[col].isnull().sum() == 0] for col in missing_cols: model = BaggingRegressor(DecisionTreeRegressor(), n_estimators=40, max_samples=1.0, max_features=1.0, bootstrap=False, n_jobs=-1) col_missing = df[df[col].isnull()] temp = df.drop(df[df[col].isnull()].index, axis=0) X = temp.loc[:, non_missing_cols] y = temp[col] model.fit(X, y) y_pred = model.predict(col_missing[non_missing_cols]) df.loc[col_missing.index, col] = y_pred return df df_new = tree_imputation(df_temp) df_new.info()
code
122255316/cell_31
[ "text_plain_output_1.png" ]
from sklearn.impute import KNNImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() SAMPLE_THRESHOLD = 5 df_temp.drop(df_temp[df_temp['missing_count'] > SAMPLE_THRESHOLD].index, axis=0, inplace=True) from sklearn.impute import KNNImputer df_temp = data.copy() num_cols = df_temp.columns[2:] df_temp = df_temp[num_cols] knn = KNNImputer(n_neighbors=3) knn.fit(df_temp)
code
122255316/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data)
code
122255316/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c data.select_dtypes(include=['object']).columns.tolist() def get_numerical_summary(df): total = df.shape[0] missing_columns = [col for col in df.columns if df[col].isnull().sum() > 0] missing_percent = {} for col in missing_columns: null_count = df[col].isnull().sum() per = null_count / total * 100 missing_percent[col] = per return missing_percent missing_percent = get_numerical_summary(data) data_temp = data.copy() features_thread = 25 for col, per in missing_percent.items(): if per > features_thread: data_temp.drop(col, axis=1, inplace=True) df_temp = data.copy() for idx in range(df_temp.shape[0]): df_temp.loc[idx, 'missing_count'] = df_temp.iloc[idx, :].isnull().sum() df_temp
code
122255316/cell_10
[ "text_html_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') import missingno as msno from matplotlib import pyplot as plt msno.matrix(data) missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c msno.dendrogram(data[missing_c])
code
122255316/cell_5
[ "image_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') import missingno as msno from matplotlib import pyplot as plt msno.matrix(data) plt.figure(figsize=(15, 9)) plt.show()
code
122255316/cell_36
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/covid-cases-and-deaths-worldwide/covid_worldwide.csv') data column_change = ['serial number', 'country', 'total_cases', 'total_deaths', 'total_recovered', 'active_cases', 'total_test', 'population'] data.columns = column_change data['total_cases'] = data['total_cases'].str.replace(',', '').astype('float64') data['total_deaths'] = data['total_deaths'].str.replace(',', '').astype('float64') data['total_recovered'] = data['total_recovered'].str.replace(',', '').astype('float64') data['active_cases'] = data['active_cases'].str.replace(',', '').astype('float64') data['total_test'] = data['total_test'].str.replace(',', '').astype('float64') data['population'] = data['population'].str.replace(',', '').astype('float64') missing_c = [col for col in data.columns if data[col].isnull().sum() > 0] missing_c missing_c
code
73070655/cell_13
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import pandas as pd df_train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) df_test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_train = df_train.drop('target', axis=1) y_train = df_train.target X_test = df_test.copy() cat_col = X_train.select_dtypes(include='object').columns num_col = X_train.select_dtypes(include='float64').columns encoder = OrdinalEncoder() X_train[cat_col] = encoder.fit_transform(X_train[cat_col]) X_test[cat_col] = encoder.transform(X_test[cat_col]) n_splits = 5 kf = KFold(n_splits, shuffle=True, random_state=0) pred_test = 0 for fold, (train_indx, valid_indx) in enumerate(kf.split(X_train)): X_train_fold = X_train.iloc[train_indx] y_train_fold = y_train.iloc[train_indx] X_valid_fold = X_train.iloc[valid_indx] y_valid_fold = y_train.iloc[valid_indx] model = XGBRegressor(tree_method='gpu_hist') model.fit(X_train_fold, y_train_fold, verbose=False) pred_valid_fold = model.predict(X_valid_fold) RMSE_fold = mean_squared_error(pred_valid_fold, y_valid_fold, squared=False) pred_test_fold = model.predict(X_test) pred_test += pred_test_fold / n_splits pd.Series(data=model.feature_importances_, index=X_train.columns).sort_values(ascending=False).plot.bar()
code
73070655/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) df_test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) df_train.info()
code
73070655/cell_12
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import pandas as pd df_train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) df_test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_train = df_train.drop('target', axis=1) y_train = df_train.target X_test = df_test.copy() cat_col = X_train.select_dtypes(include='object').columns num_col = X_train.select_dtypes(include='float64').columns encoder = OrdinalEncoder() X_train[cat_col] = encoder.fit_transform(X_train[cat_col]) X_test[cat_col] = encoder.transform(X_test[cat_col]) n_splits = 5 kf = KFold(n_splits, shuffle=True, random_state=0) pred_test = 0 for fold, (train_indx, valid_indx) in enumerate(kf.split(X_train)): X_train_fold = X_train.iloc[train_indx] y_train_fold = y_train.iloc[train_indx] X_valid_fold = X_train.iloc[valid_indx] y_valid_fold = y_train.iloc[valid_indx] model = XGBRegressor(tree_method='gpu_hist') model.fit(X_train_fold, y_train_fold, verbose=False) pred_valid_fold = model.predict(X_valid_fold) RMSE_fold = mean_squared_error(pred_valid_fold, y_valid_fold, squared=False) print(f'Fold {fold}: {RMSE_fold:.5f}') pred_test_fold = model.predict(X_test) pred_test += pred_test_fold / n_splits
code
73070655/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) df_test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) df_train.head()
code
18103775/cell_21
[ "text_html_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100) df_partition['partition'].value_counts().sort_index()
code
18103775/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape for i, j in enumerate(df_attr.columns): print(i + 1, j)
code
18103775/cell_9
[ "image_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.describe()
code
18103775/cell_23
[ "text_html_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import matplotlib.pyplot as plt import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100) df_partition.set_index('image_id', inplace=True) df_par_attr = df_partition.join(df_attr['Male'], how='inner') df_par_attr.head(5)
code
18103775/cell_30
[ "text_html_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import matplotlib.pyplot as plt import pandas as pd import seaborn as sns main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] def load_reshape_img(fname): img = load_img(fname) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) return x datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) img = load_img(example_pic) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) plt.figure(figsize=(20, 10)) plt.suptitle('Data augmentation', fontsize=28) i = 0 for batch in datagen.flow(x, batch_size=1): plt.subplot(3, 5, i + 1) plt.grid(False) plt.imshow(batch.reshape(218, 178, 3)) if i == 9: break i = i + 1 plt.show()
code
18103775/cell_40
[ "image_output_1.png" ]
from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.callbacks import ModelCheckpoint from keras.layers import Dropout, Dense, Flatten, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from keras.utils import np_utils import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100) df_partition.set_index('image_id', inplace=True) df_par_attr = df_partition.join(df_attr['Male'], how='inner') df_par_attr.shape def load_reshape_img(fname): img = load_img(fname) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) return x def generate_df(partition, attr, num_samples): df_ = df_par_attr[(df_par_attr['partition'] == partition) & (df_par_attr[attr] == 0)].sample(int(num_samples / 2)) df_ = pd.concat([df_, df_par_attr[(df_par_attr['partition'] == partition) & (df_par_attr[attr] == 1)].sample(int(num_samples / 2))]) if partition != 2: x_ = np.array([load_reshape_img(images_folder + fname) for fname in df_.index]) x_ = x_.reshape(x_.shape[0], 218, 178, 3) y_ = np_utils.to_categorical(df_[attr], 2) else: x_ = [] y_ = [] for index, target in df_.iterrows(): im = cv2.imread(images_folder + index) im = cv2.resize(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), (img_width, img_height)).astype(np.float32) / 255.0 im = np.expand_dims(im, axis=0) x_.append(im) y_.append(target[attr]) return (x_, y_) datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) img = load_img(example_pic) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) i = 0 for batch in datagen.flow(x, batch_size=1): if i == 9: break i = i + 1 x_train, y_train = generate_df(0, 'Male', training_sample) train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input, rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) train_datagen.fit(x_train) train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size) inc_model = InceptionV3(weights='../input/inceptionv3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, input_shape=(img_height, img_width, 3)) x = inc_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) predictions = Dense(2, activation='softmax')(x) model_ = Model(inputs=inc_model.input, outputs=predictions) for layer in model_.layers[:52]: layer.trainable = False model_.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) checkpointer = ModelCheckpoint(filepath='weights.best.inc.male.hdf5', verbose=1, save_best_only=True) hist = model_.fit_generator(train_generator, validation_data=(x_valid, y_valid), steps_per_epoch=training_sample / batch_size, epochs=num_epochs, callbacks=[checkpointer], verbose=1)
code
18103775/cell_41
[ "text_plain_output_1.png" ]
from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras.callbacks import ModelCheckpoint from keras.layers import Dropout, Dense, Flatten, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from keras.utils import np_utils import cv2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100) df_partition.set_index('image_id', inplace=True) df_par_attr = df_partition.join(df_attr['Male'], how='inner') df_par_attr.shape def load_reshape_img(fname): img = load_img(fname) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) return x def generate_df(partition, attr, num_samples): df_ = df_par_attr[(df_par_attr['partition'] == partition) & (df_par_attr[attr] == 0)].sample(int(num_samples / 2)) df_ = pd.concat([df_, df_par_attr[(df_par_attr['partition'] == partition) & (df_par_attr[attr] == 1)].sample(int(num_samples / 2))]) if partition != 2: x_ = np.array([load_reshape_img(images_folder + fname) for fname in df_.index]) x_ = x_.reshape(x_.shape[0], 218, 178, 3) y_ = np_utils.to_categorical(df_[attr], 2) else: x_ = [] y_ = [] for index, target in df_.iterrows(): im = cv2.imread(images_folder + index) im = cv2.resize(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), (img_width, img_height)).astype(np.float32) / 255.0 im = np.expand_dims(im, axis=0) x_.append(im) y_.append(target[attr]) return (x_, y_) datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) img = load_img(example_pic) x = img_to_array(img) / 255.0 x = x.reshape((1,) + x.shape) i = 0 for batch in datagen.flow(x, batch_size=1): if i == 9: break i = i + 1 x_train, y_train = generate_df(0, 'Male', training_sample) train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input, rotation_range=30, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) train_datagen.fit(x_train) train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size) inc_model = InceptionV3(weights='../input/inceptionv3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, input_shape=(img_height, img_width, 3)) x = inc_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) predictions = Dense(2, activation='softmax')(x) model_ = Model(inputs=inc_model.input, outputs=predictions) for layer in model_.layers[:52]: layer.trainable = False model_.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) checkpointer = ModelCheckpoint(filepath='weights.best.inc.male.hdf5', verbose=1, save_best_only=True) hist = model_.fit_generator(train_generator, validation_data=(x_valid, y_valid), steps_per_epoch=training_sample / batch_size, epochs=num_epochs, callbacks=[checkpointer], verbose=1) plt.figure(figsize=(18, 4)) plt.plot(hist.history['loss'], label='train') plt.plot(hist.history['val_loss'], label='validation') plt.legend() plt.title('loss function') plt.show()
code
18103775/cell_2
[ "image_output_1.png" ]
from IPython.core.display import display, HTML from PIL import Image from io import BytesIO import base64 plt.style.use('ggplot') import tensorflow as tf print(tf.__version__)
code
18103775/cell_11
[ "text_html_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum()
code
18103775/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100)
code
18103775/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import os import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import cv2 import seaborn as sns from sklearn.metrics import f1_score import os print(os.listdir('../input')) import warnings warnings.filterwarnings('ignore') from keras.applications.inception_v3 import InceptionV3, preprocess_input from keras import optimizers from keras.models import Sequential, Model from keras.layers import Dropout, Dense, Flatten, GlobalAveragePooling2D from keras.callbacks import ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from keras.utils import np_utils from keras.optimizers import SGD
code
18103775/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.head(5)
code
18103775/cell_8
[ "image_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.head(5)
code
18103775/cell_16
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import matplotlib.pyplot as plt import pandas as pd import seaborn as sns main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] sns.countplot(df_attr['Male']) plt.show()
code
18103775/cell_35
[ "text_plain_output_1.png" ]
from keras.applications.inception_v3 import InceptionV3, preprocess_input main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 inc_model = InceptionV3(weights='../input/inceptionv3/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, input_shape=(img_height, img_width, 3)) print('number of layers in the model : ', len(inc_model.layers))
code
18103775/cell_24
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import matplotlib.pyplot as plt import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']] df_partition = pd.read_csv(main_folder + 'list_eval_partition.csv') df_partition.sample(100) df_partition.set_index('image_id', inplace=True) df_par_attr = df_partition.join(df_attr['Male'], how='inner') df_par_attr.shape
code
18103775/cell_14
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img import matplotlib.pyplot as plt import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape img = load_img(example_pic) plt.grid(False) plt.imshow(img) df_attr.loc[example_pic.split('/')[-1]][['Smiling', 'Male', 'Young']]
code
18103775/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns
code
18103775/cell_12
[ "text_html_output_1.png" ]
import pandas as pd main_folder = '../input/celeba-dataset/' images_folder = main_folder + 'img_align_celeba/img_align_celeba/' example_pic = images_folder + '000506.jpg' training_sample = 10000 validation_sample = 2000 test_sample = 2000 img_width = 178 img_height = 218 batch_size = 16 num_epochs = 5 df_attr = pd.read_csv(main_folder + 'list_attr_celeba.csv') df_attr.set_index('image_id', inplace=True) df_attr.replace(to_replace=-1, value=0, inplace=True) df_attr.columns df_attr.isnull().sum() df_attr.shape
code
73090970/cell_21
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes crimes
code
73090970/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution
code
73090970/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum()
code
73090970/cell_4
[ "text_html_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes
code
73090970/cell_23
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split, cross_val_score import numpy as np import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes train_features = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'Vict Age', 'Premis Cd Log', 'Weapon Used Cd Log']] train_label = crimes['Crm Cd Log'].astype(int) X_train, X_test, y_train, y_test = train_test_split(train_features, train_label, test_size=0.2, random_state=11) print('Shape of X_train: ', X_train.shape) print('Shape of X_test: ', X_test.shape) print('Shape of y_train: ', y_train.shape) print('Shape of y_test: ', y_test.shape)
code
73090970/cell_26
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, ElasticNet from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split, cross_val_score import numpy as np import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes train_features = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'Vict Age', 'Premis Cd Log', 'Weapon Used Cd Log']] train_label = crimes['Crm Cd Log'].astype(int) X_train, X_test, y_train, y_test = train_test_split(train_features, train_label, test_size=0.2, random_state=11) lr_reg = LogisticRegression(solver='liblinear') lr_reg.fit(X_train, y_train) lr_preds = lr_reg.predict(X_test) lr_mse = mean_squared_error(y_test, lr_preds) lr_rmse = np.sqrt(lr_mse) print('MSE : {0:.3f}, RMSE : {1:.3f}'.format(lr_mse, lr_mse)) print('Variance score : {0:.3f}'.format(r2_score(y_test, lr_preds)))
code
73090970/cell_11
[ "text_html_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes['DATE OCC'] = pd.to_datetime(crimes['DATE OCC']) crimes['YEAR OCC'] = crimes['DATE OCC'].dt.year crimes['MONTH OCC'] = crimes['DATE OCC'].dt.month crimes['DAY OCC'] = crimes['DATE OCC'].dt.day crimes
code
73090970/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution # Check distribution of each features fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(20, 10)) for i, feature in enumerate(crimes_distribution.columns): row = int(i/4) col = i%4 sns.distplot(crimes_distribution.iloc[:, i], ax=axs[row][col]) plt.suptitle('Distirbution of features') plt.tight_layout Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes crimes_distribution_log = crimes[['Crm Cd Log', 'Premis Cd Log', 'Weapon Used Cd Log']] crimes_distribution_log fig, axs = plt.subplots(ncols=3, figsize=(15, 5)) for i, feature in enumerate(crimes_distribution_log.columns): col = i % 3 sns.distplot(crimes_distribution_log.iloc[:, i], ax=axs[col]) plt.suptitle('Distirbution of features log converted') plt.tight_layout
code
73090970/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.head()
code
73090970/cell_18
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes crimes_distribution_log = crimes[['Crm Cd Log', 'Premis Cd Log', 'Weapon Used Cd Log']] crimes_distribution_log
code
73090970/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes
code
73090970/cell_16
[ "text_html_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes
code
73090970/cell_17
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution Vict_Age_0 = crimes[crimes['Vict Age'] == 0].index crimes.drop(Vict_Age_0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes Crm_Cd_Log = np.log1p(crimes['Crm Cd']) Premis_Cd_Log = np.log1p(crimes['Premis Cd']) Weapon_Used_Cd_Log = np.log1p(crimes['Weapon Used Cd']) crimes.insert(7, 'Crm Cd Log', Crm_Cd_Log) crimes.insert(12, 'Premis Cd Log', Premis_Cd_Log) crimes.insert(14, 'Weapon Used Cd Log', Weapon_Used_Cd_Log) crimes
code
73090970/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes crimes_distribution = crimes.iloc[:, [1, 2, 3, 4, 6, 7, 10, 11]] crimes_distribution fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(20, 10)) for i, feature in enumerate(crimes_distribution.columns): row = int(i / 4) col = i % 4 sns.distplot(crimes_distribution.iloc[:, i], ax=axs[row][col]) plt.suptitle('Distirbution of features') plt.tight_layout
code
73090970/cell_10
[ "text_html_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes
code
73090970/cell_12
[ "text_html_output_1.png" ]
import pandas as pd crimes = pd.read_csv('../input/crime-in-los-angeles-data-from-2020-to-present/Crime_Data_from_2020_to_Present.csv') crimes crimes.drop(['DR_NO', 'Date Rptd', 'Rpt Dist No', 'Part 1-2', 'Mocodes', 'Crm Cd 1', 'Crm Cd 2', 'Crm Cd 3', 'Crm Cd 4', 'Cross Street', 'LOCATION'], axis=1, inplace=True) crimes crimes.isnull().sum() crimes.dropna(axis=0, inplace=True) crimes.reset_index(drop=True, inplace=True) crimes crimes_desc = crimes[['Crm Cd Desc', 'Premis Desc', 'Weapon Desc', 'Status Desc']] crimes = crimes[['YEAR OCC', 'MONTH OCC', 'DAY OCC', 'TIME OCC', 'AREA', 'AREA NAME', 'Crm Cd', 'Vict Age', 'Vict Sex', 'Vict Descent', 'Premis Cd', 'Weapon Used Cd', 'Status']] crimes
code
72101116/cell_21
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from optuna.visualization import plot_optimization_history, plot_param_importances plot_param_importances(study)
code
72101116/cell_13
[ "text_html_output_1.png" ]
cat_features = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9'] df_cat = feature_matrix[cat_features] feature_matrix = feature_matrix.drop(cat_features, axis=1) feature_matrix.head()
code
72101116/cell_25
[ "text_html_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.model_selection import KFold, train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import math, random from sklearn.model_selection import KFold, train_test_split from sklearn.metrics import mean_squared_error from sklearn.preprocessing import LabelEncoder pd.set_option('display.max_columns', 100) from lightgbm import LGBMRegressor SEED = 47 PATH = '../input/30-days-of-ml/' df_train = pd.read_csv(PATH + '/train.csv') df_test = pd.read_csv(PATH + '/test.csv') df_sub = pd.read_csv(PATH + '/sample_submission.csv') target = df_train['target'] features = df_train.drop(['id', 'target'], axis=1) cat_features = ['cat0', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9'] df_cat = feature_matrix[cat_features] feature_matrix = feature_matrix.drop(cat_features, axis=1) study.best_params optuna_params = study.best_params optuna_params['metric'] = 'rmse' optuna_params['random_state'] = SEED optuna_params['n_estimators'] = 10000 X_train, X_test, y_train, y_test = train_test_split(feature_matrix, target, test_size=0.2, random_state=SEED) model_optuna = LGBMRegressor(**optuna_params) model_optuna.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=300, verbose=300)
code
72101116/cell_19
[ "text_html_output_1.png" ]
from optuna.visualization import plot_optimization_history, plot_param_importances plot_optimization_history(study)
code
72101116/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code