path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
72106185/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count | code |
72106185/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count
y = train['target']
X = train.drop('target', axis=1)
X | code |
72106185/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
print(train.columns)
train | code |
72106185/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count
y = train['target']
X = train.drop('target', axis=1)
num_cols = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]
cat_cols = [col for col in X.columns if X[col].dtype == 'object' and X[col].nunique() < 10]
cardinality = pd.Series([X[col].nunique() for col in cat_cols], index=cat_cols, name='Cardinality')
cardinality | code |
72106185/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count
y = train['target']
X = train.drop('target', axis=1)
num_cols = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]
cat_cols = [col for col in X.columns if X[col].dtype == 'object' and X[col].nunique() < 10]
cardinality = pd.Series([X[col].nunique() for col in cat_cols], index=cat_cols, name='Cardinality')
cardinality
from sklearn.preprocessing import OneHotEncoder
OHE = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OHE.fit_transform(X_train[cat_cols]))
OH_cols_valid = pd.DataFrame(OHE.transform(X_valid[cat_cols]))
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
num_X_train = X_train.drop(cat_cols, axis=1)
num_X_valid = X_valid.drop(cat_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
test = pd.read_csv('../input/30-days-of-ml/test.csv')
test
X_testID = test['id']
test.drop('id', axis=1, inplace=True)
X_test = test
X_test.drop('cat9', axis=1, inplace=True)
X_test | code |
72106185/cell_14 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count
y = train['target']
X = train.drop('target', axis=1)
num_cols = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]
cat_cols = [col for col in X.columns if X[col].dtype == 'object' and X[col].nunique() < 10]
cardinality = pd.Series([X[col].nunique() for col in cat_cols], index=cat_cols, name='Cardinality')
cardinality
from sklearn.preprocessing import OneHotEncoder
OHE = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OHE.fit_transform(X_train[cat_cols]))
OH_cols_valid = pd.DataFrame(OHE.transform(X_valid[cat_cols]))
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
num_X_train = X_train.drop(cat_cols, axis=1)
num_X_valid = X_valid.drop(cat_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
test = pd.read_csv('../input/30-days-of-ml/test.csv')
test | code |
72106185/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
train
train.drop('id', axis=1, inplace=True)
na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count')
na_count
y = train['target']
X = train.drop('target', axis=1)
num_cols = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]
cat_cols = [col for col in X.columns if X[col].dtype == 'object' and X[col].nunique() < 10]
cardinality = pd.Series([X[col].nunique() for col in cat_cols], index=cat_cols, name='Cardinality')
cardinality
from sklearn.preprocessing import OneHotEncoder
OHE = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OHE.fit_transform(X_train[cat_cols]))
OH_cols_valid = pd.DataFrame(OHE.transform(X_valid[cat_cols]))
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
num_X_train = X_train.drop(cat_cols, axis=1)
num_X_valid = X_valid.drop(cat_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
OH_X_train | code |
2010808/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Imputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
y = df['Survived']
columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
df = df[columns]
from sklearn.preprocessing import OneHotEncoder
df = pd.get_dummies(df)
Pclass_1 = df['Pclass'] == 1
Pclass_2 = df['Pclass'] == 2
Pclass_3 = df['Pclass'] == 3
df = df.join(Pclass_1, lsuffix='', rsuffix='_1')
df = df.join(Pclass_2, lsuffix='', rsuffix='_2')
df = df.join(Pclass_3, lsuffix='', rsuffix='_3')
del df['Pclass']
X = df
my_pipeline = make_pipeline(Imputer(), LogisticRegression())
my_pipeline.fit(X, y)
my_pipeline.score(X, y) | code |
2010808/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2010808/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Imputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
y = df['Survived']
columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
df = df[columns]
from sklearn.preprocessing import OneHotEncoder
df = pd.get_dummies(df)
Pclass_1 = df['Pclass'] == 1
Pclass_2 = df['Pclass'] == 2
Pclass_3 = df['Pclass'] == 3
df = df.join(Pclass_1, lsuffix='', rsuffix='_1')
df = df.join(Pclass_2, lsuffix='', rsuffix='_2')
df = df.join(Pclass_3, lsuffix='', rsuffix='_3')
del df['Pclass']
X = df
test = pd.read_csv('../input/test.csv')
test = test[columns]
X_test = pd.get_dummies(test)
Pclass_1 = X_test['Pclass'] == 1
Pclass_2 = X_test['Pclass'] == 2
Pclass_3 = X_test['Pclass'] == 3
X_test = X_test.join(Pclass_1, lsuffix='', rsuffix='_1')
X_test = X_test.join(Pclass_2, lsuffix='', rsuffix='_2')
X_test = X_test.join(Pclass_3, lsuffix='', rsuffix='_3')
del X_test['Pclass']
my_pipeline = make_pipeline(Imputer(), LogisticRegression())
my_pipeline.fit(X, y)
my_pipeline.score(X, y)
df = pd.read_csv('../input/gender_submission.csv')
y_test = df.Survived
my_pipeline.score(X_test, y_test) | code |
2024726/cell_42 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
plt.figure(figsize=(14, 12))
sns.heatmap(train_df.astype(float).corr(), annot=True) | code |
2024726/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
plt.figure(figsize=(20, 7))
sns.boxplot('Pclass', 'Fare', data=test_df) | code |
2024726/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(train_df[train_df['Pclass'] == 1]['Age'].mean())
print(train_df[train_df['Pclass'] == 2]['Age'].mean())
print(train_df[train_df['Pclass'] == 3]['Age'].mean()) | code |
2024726/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sns.countplot('Survived', hue='Sex', data=train_df)
print(train_df[train_df['Sex'] == 'male']['Survived'].value_counts())
print(train_df[train_df['Sex'] == 'male']['Survived'].value_counts(normalize=True))
print(train_df[train_df['Sex'] == 'female']['Survived'].value_counts())
print(train_df[train_df['Sex'] == 'female']['Survived'].value_counts(normalize=True)) | code |
2024726/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.info() | code |
2024726/cell_44 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
train_x.head() | code |
2024726/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def immute_ages(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
if Pclass == 2:
return 30
else:
return 25
else:
return Age
train_df[pd.isnull(train_df['Embarked'])] | code |
2024726/cell_55 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def includeFamilySize(df):
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['isAlone'] = 0
df.loc[df['FamilySize'] == 1, 'isAlone'] = 1
def immute_ages(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
if Pclass == 2:
return 30
else:
return 25
else:
return Age
train_df[pd.isnull(train_df['Embarked'])]
def fare_age_Categ(df):
df['CategoricalFare'] = pd.qcut(df['Fare'], 4)
df['CategoricalAge'] = pd.cut(df['Age'], 5)
retest = pd.read_csv('test.csv')
final_df = pd.DataFrame({'PassengerId': retest['PassengerId'], 'Survived': svc_pred_test})
final_df.to_csv('rf.csv', index=False) | code |
2024726/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.describe() | code |
2024726/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(test_df.columns)
print(train_df.columns) | code |
2024726/cell_48 | [
"text_html_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
dt_pred_train, score = predict(train_x, train_y, train_x, DecisionTreeClassifier(max_depth=10, min_samples_split=5, random_state=1))
dt_pred_test, score = predict(train_x, train_y, test_x, DecisionTreeClassifier(max_depth=10, min_samples_split=5, random_state=1))
score | code |
2024726/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.head() | code |
2024726/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.head() | code |
2024726/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sns.countplot('Survived', hue='Embarked', data=train_df) | code |
2024726/cell_50 | [
"text_html_output_1.png"
] | from sklearn.svm import SVC
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
svc_model = SVC()
svc_pred_train, score = predict(train_x, train_y, train_x, svc_model)
svc_pred_test, score = predict(train_x, train_y, test_x, svc_model)
score | code |
2024726/cell_52 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
knn_pred_train, score = predict(train_x, train_y, train_x, KNeighborsClassifier())
knn_pred_test, score = predict(train_x, train_y, test_x, KNeighborsClassifier())
score | code |
2024726/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import KFold
import re as re
import warnings
warnings.filterwarnings('ignore') | code |
2024726/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_df.describe() | code |
2024726/cell_45 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
test_x.head() | code |
2024726/cell_49 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
rfc = RandomForestClassifier(max_depth=10, min_samples_split=2, n_estimators=600, random_state=1)
rt_pred_train, score = predict(train_x, train_y, train_x, rfc)
rt_pred_test, score = predict(train_x, train_y, test_x, rfc)
score | code |
2024726/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())
print(train_df[['isAlone', 'Survived']].groupby(['isAlone'], as_index=False).mean()) | code |
2024726/cell_51 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001]}
grid = GridSearchCV(SVC(), param_grid, refit=True)
grid_pred_train, score = predict(train_x, train_y, train_x, grid)
grid_pred_test, score = predict(train_x, train_y, test_x, grid)
score | code |
2024726/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def includeFamilySize(df):
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['isAlone'] = 0
df.loc[df['FamilySize'] == 1, 'isAlone'] = 1
def immute_ages(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
if Pclass == 2:
return 30
else:
return 25
else:
return Age
train_df[pd.isnull(train_df['Embarked'])]
def fare_age_Categ(df):
df['CategoricalFare'] = pd.qcut(df['Fare'], 4)
df['CategoricalAge'] = pd.cut(df['Age'], 5)
print(pd.crosstab(train_df['Title'], train_df['Sex'])) | code |
2024726/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sns.countplot('Survived', data=train_df)
print(train_df['Survived'].value_counts())
print(train_df['Survived'].value_counts(normalize=True)) | code |
2024726/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean()) | code |
2024726/cell_47 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_x = train_df.drop(['Survived'], axis=1)
train_y = train_df['Survived']
test_x = test_df
def predict(train_x, train_y, test_df, algo):
algo.fit(train_x, train_y)
pred_y = algo.predict(test_df)
score = algo.score(train_x, train_y)
return (pred_y, score)
lr_pred_train, score = predict(train_x, train_y, train_x, LogisticRegression())
lr_pred_test, score = predict(train_x, train_y, test_x, LogisticRegression())
score | code |
2024726/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_df.head() | code |
2024726/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(train_df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()) | code |
2024726/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def includeFamilySize(df):
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['isAlone'] = 0
df.loc[df['FamilySize'] == 1, 'isAlone'] = 1
def immute_ages(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
if Pclass == 2:
return 30
else:
return 25
else:
return Age
train_df[pd.isnull(train_df['Embarked'])]
def fare_age_Categ(df):
df['CategoricalFare'] = pd.qcut(df['Fare'], 4)
df['CategoricalAge'] = pd.cut(df['Age'], 5)
fare_age_Categ(train_df)
fare_age_Categ(test_df)
print(train_df[['CategoricalFare', 'Survived']].groupby(['CategoricalFare'], as_index=False).mean())
print(train_df[['CategoricalAge', 'Survived']].groupby(['CategoricalAge'], as_index=False).mean()) | code |
2024726/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
print(train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean()) | code |
2024726/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_df[test_df['Fare'].isnull()]
print(test_df[test_df['Pclass'] == 3]['Fare'].mean())
test_df['Fare'].fillna(12.45, inplace=True) | code |
2024726/cell_53 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def includeFamilySize(df):
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['isAlone'] = 0
df.loc[df['FamilySize'] == 1, 'isAlone'] = 1
def immute_ages(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
if Pclass == 2:
return 30
else:
return 25
else:
return Age
train_df[pd.isnull(train_df['Embarked'])]
def fare_age_Categ(df):
df['CategoricalFare'] = pd.qcut(df['Fare'], 4)
df['CategoricalAge'] = pd.cut(df['Age'], 5)
retest = pd.read_csv('test.csv') | code |
2024726/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
plt.figure(figsize=(10, 7))
sns.boxplot('Pclass', 'Age', data=train_df) | code |
2024726/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
sns.countplot('Embarked', hue='Pclass', data=train_df) | code |
2024726/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_df.info() | code |
88083988/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df1 = df.melt(var_name='columns', value_name='index')
unique_values = pd.DataFrame(df1['index'].value_counts())
unique_values.index
values = unique_values.head(20)
plt.figure(figsize=(18, 6))
plt.title('Frequency barplot')
ax = sns.barplot(x=values.index, y=values['index'])
plt.xticks(rotation='vertical')
ax.bar_label(ax.containers[0])
plt.show() | code |
88083988/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df.info() | code |
88083988/cell_1 | [
"text_plain_output_1.png"
] | !pip install apyori
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns | code |
88083988/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df1 = df.melt(var_name='columns', value_name='index')
unique_values = pd.DataFrame(df1['index'].value_counts())
unique_values.head(10) | code |
88083988/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df1 = df.melt(var_name='columns', value_name='index')
unique_values = pd.DataFrame(df1['index'].value_counts())
unique_values.index | code |
88083988/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df | code |
88083988/cell_14 | [
"text_plain_output_1.png"
] | from apyori import apriori
import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df1 = df.melt(var_name='columns', value_name='index')
unique_values = pd.DataFrame(df1['index'].value_counts())
transacts = []
for i in range(len(df)):
transacts.append([str(df.values[i, j]) for j in range(len(df.columns))])
from apyori import apriori
rules = apriori(transactions=transacts, min_support=0.001, min_confidence=0.05, min_lift=1.01, min_length=2, max_length=2)
output = list(rules)
def inspect(output):
lhs = [tuple(result[2][0][0])[0] for result in output]
rhs = [tuple(result[2][0][1])[0] for result in output]
support = [result[1] for result in output]
confidence = [result[2][0][2] for result in output]
lift = [result[2][0][3] for result in output]
return list(zip(lhs, rhs, support, confidence, lift))
output_DataFrame = pd.DataFrame(inspect(output), columns=['Left_Hand_Side', 'Right_Hand_Side', 'Support', 'Confidence', 'Lift'])
output_DataFrame.sort_values(by='Lift', ascending=False).head(20) | code |
88083988/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df1 = df.melt(var_name='columns', value_name='index')
unique_values = pd.DataFrame(df1['index'].value_counts())
unique_values.index
values = unique_values.head(20)
plt.figure(figsize = (18,6))
plt.title('Frequency barplot')
ax = sns.barplot(x = values.index, y = values['index'])
plt.xticks(rotation = 'vertical')
ax.bar_label(ax.containers[0]) # for values to be shown
plt.show()
unique_values.sum() | code |
88083988/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/grocery-products-purchase-data/Grocery Products Purchase.csv')
df.describe() | code |
34150863/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
df.dtypes
dataframe = df.iloc[1:216, :-1]
dataframe.style.background_gradient(cmap='Reds') | code |
34150863/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
time_series.drop('Province/State', axis=1, inplace=True)
time_series.head() | code |
34150863/cell_4 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
init_notebook_mode(connected=True)
cf.go_offline()
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import warnings
warnings.filterwarnings('ignore') | code |
34150863/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
type(df) | code |
34150863/cell_19 | [
"text_plain_output_1.png"
] | from plotly.subplots import make_subplots
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
time_series.drop('Province/State', axis=1, inplace=True)
df.dtypes
df['TotalCases'] = df['TotalCases'].fillna(0).astype('int')
df['TotalDeaths'] = df['TotalDeaths'].fillna(0).astype('int')
df['TotalRecovered'] = df['TotalRecovered'].fillna(0).astype('int')
df['ActiveCases'] = df['ActiveCases'].fillna(0).astype('int')
df['Serious'] = df['Serious'].fillna(0).astype('int')
df['Deaths/1M pop'] = df['Deaths/1M pop'].fillna(0).astype('int')
df['TotalTests'] = df['TotalTests'].fillna(0).astype('int')
df['Tests/ 1M pop'] = df['Tests/ 1M pop'].fillna(0).astype('int')
df['NewCases'] = df['NewCases'].fillna(0)
df[['NewCases']] = df[['NewCases']].replace('[\\+,]', '', regex=True).astype(int)
df['NewDeaths'] = df['NewDeaths'].fillna(0)
df[['NewDeaths']] = df[['NewDeaths']].replace('[\\+,]', '', regex=True).astype(int)
df[['Population']] = df[['Population']].fillna(0).astype('int')
time_series.fillna(0)
time_series.isnull().sum()
dataframe = df.iloc[1:216, :-1]
dataframe.style.background_gradient(cmap='Reds')
group1 = time_series.groupby(['Date', 'Country'])['Confirmed', 'Deaths', 'Recovered'].sum().reset_index()
heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Confirmed']), hover_name='Country', projection='natural earth', title='Heatmap', color_continuous_scale=px.colors.sequential.Blues)
heat.update(layout_coloraxis_showscale=False)
fig_heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Deaths']), hover_name='Country', projection='natural earth', title='Heatmap(Deaths)', color_continuous_scale=px.colors.sequential.Reds)
fig_heat.update(layout_coloraxis_showscale=False)
fig_z = px.bar(dataframe.sort_values('TotalCases'), x='TotalCases', y='Country', orientation='h', color_discrete_sequence=['#B3611A'], text='TotalCases', title='TotalCases')
fig_x = px.bar(dataframe.sort_values('TotalDeaths'), x='TotalDeaths', y='Country', orientation='h', color_discrete_sequence=['#830707'], text='TotalDeaths', title='TotalDeaths')
fig_ = px.bar(dataframe.sort_values('TotalRecovered'), x='TotalRecovered', y='Country', orientation='h', color_discrete_sequence=['#073707'], text='TotalRecovered', title='TotalRecovered')
fig_p = make_subplots(rows=1, cols=3, subplot_titles=('TotalCases', 'TotalDeaths', 'TotalRecovered'))
fig_p.add_trace(fig_z['data'][0], row=1, col=1)
fig_p.add_trace(fig_x['data'][0], row=1, col=2)
fig_p.add_trace(fig_['data'][0], row=1, col=3)
fig_p.update_layout(height=3000, title='Per Country')
fig_p.show() | code |
34150863/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum() | code |
34150863/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
df.dtypes
dataframe = df.iloc[1:216, :-1]
dataframe.style.background_gradient(cmap='Reds')
dataframe.head() | code |
34150863/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
df.head() | code |
34150863/cell_15 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
time_series.drop('Province/State', axis=1, inplace=True)
df.dtypes
df['TotalCases'] = df['TotalCases'].fillna(0).astype('int')
df['TotalDeaths'] = df['TotalDeaths'].fillna(0).astype('int')
df['TotalRecovered'] = df['TotalRecovered'].fillna(0).astype('int')
df['ActiveCases'] = df['ActiveCases'].fillna(0).astype('int')
df['Serious'] = df['Serious'].fillna(0).astype('int')
df['Deaths/1M pop'] = df['Deaths/1M pop'].fillna(0).astype('int')
df['TotalTests'] = df['TotalTests'].fillna(0).astype('int')
df['Tests/ 1M pop'] = df['Tests/ 1M pop'].fillna(0).astype('int')
df['NewCases'] = df['NewCases'].fillna(0)
df[['NewCases']] = df[['NewCases']].replace('[\\+,]', '', regex=True).astype(int)
df['NewDeaths'] = df['NewDeaths'].fillna(0)
df[['NewDeaths']] = df[['NewDeaths']].replace('[\\+,]', '', regex=True).astype(int)
df[['Population']] = df[['Population']].fillna(0).astype('int')
time_series.fillna(0)
time_series.isnull().sum()
group1 = time_series.groupby(['Date', 'Country'])['Confirmed', 'Deaths', 'Recovered'].sum().reset_index()
heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Confirmed']), hover_name='Country', projection='natural earth', title='Heatmap', color_continuous_scale=px.colors.sequential.Blues)
heat.update(layout_coloraxis_showscale=False)
heat.show() | code |
34150863/cell_3 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34150863/cell_17 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
time_series.drop('Province/State', axis=1, inplace=True)
df.dtypes
df['TotalCases'] = df['TotalCases'].fillna(0).astype('int')
df['TotalDeaths'] = df['TotalDeaths'].fillna(0).astype('int')
df['TotalRecovered'] = df['TotalRecovered'].fillna(0).astype('int')
df['ActiveCases'] = df['ActiveCases'].fillna(0).astype('int')
df['Serious'] = df['Serious'].fillna(0).astype('int')
df['Deaths/1M pop'] = df['Deaths/1M pop'].fillna(0).astype('int')
df['TotalTests'] = df['TotalTests'].fillna(0).astype('int')
df['Tests/ 1M pop'] = df['Tests/ 1M pop'].fillna(0).astype('int')
df['NewCases'] = df['NewCases'].fillna(0)
df[['NewCases']] = df[['NewCases']].replace('[\\+,]', '', regex=True).astype(int)
df['NewDeaths'] = df['NewDeaths'].fillna(0)
df[['NewDeaths']] = df[['NewDeaths']].replace('[\\+,]', '', regex=True).astype(int)
df[['Population']] = df[['Population']].fillna(0).astype('int')
time_series.fillna(0)
time_series.isnull().sum()
group1 = time_series.groupby(['Date', 'Country'])['Confirmed', 'Deaths', 'Recovered'].sum().reset_index()
heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Confirmed']), hover_name='Country', projection='natural earth', title='Heatmap', color_continuous_scale=px.colors.sequential.Blues)
heat.update(layout_coloraxis_showscale=False)
fig_heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Deaths']), hover_name='Country', projection='natural earth', title='Heatmap(Deaths)', color_continuous_scale=px.colors.sequential.Reds)
fig_heat.update(layout_coloraxis_showscale=False)
fig_heat.show() | code |
34150863/cell_22 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
time_series.drop('Province/State', axis=1, inplace=True)
df.dtypes
df['TotalCases'] = df['TotalCases'].fillna(0).astype('int')
df['TotalDeaths'] = df['TotalDeaths'].fillna(0).astype('int')
df['TotalRecovered'] = df['TotalRecovered'].fillna(0).astype('int')
df['ActiveCases'] = df['ActiveCases'].fillna(0).astype('int')
df['Serious'] = df['Serious'].fillna(0).astype('int')
df['Deaths/1M pop'] = df['Deaths/1M pop'].fillna(0).astype('int')
df['TotalTests'] = df['TotalTests'].fillna(0).astype('int')
df['Tests/ 1M pop'] = df['Tests/ 1M pop'].fillna(0).astype('int')
df['NewCases'] = df['NewCases'].fillna(0)
df[['NewCases']] = df[['NewCases']].replace('[\\+,]', '', regex=True).astype(int)
df['NewDeaths'] = df['NewDeaths'].fillna(0)
df[['NewDeaths']] = df[['NewDeaths']].replace('[\\+,]', '', regex=True).astype(int)
df[['Population']] = df[['Population']].fillna(0).astype('int')
time_series.fillna(0)
time_series.isnull().sum()
dataframe = df.iloc[1:216, :-1]
dataframe.style.background_gradient(cmap='Reds')
group1 = time_series.groupby(['Date', 'Country'])['Confirmed', 'Deaths', 'Recovered'].sum().reset_index()
heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Confirmed']), hover_name='Country', projection='natural earth', title='Heatmap', color_continuous_scale=px.colors.sequential.Blues)
heat.update(layout_coloraxis_showscale=False)
fig_heat = px.choropleth(group1, locations='Country', locationmode='country names', color=np.log(group1['Deaths']), hover_name='Country', projection='natural earth', title='Heatmap(Deaths)', color_continuous_scale=px.colors.sequential.Reds)
fig_heat.update(layout_coloraxis_showscale=False)
fig_z = px.bar(dataframe.sort_values('TotalCases'),x='TotalCases', y='Country',orientation = 'h',
color_discrete_sequence=['#B3611A'],text = 'TotalCases',title='TotalCases')
fig_x = px.bar(dataframe.sort_values('TotalDeaths'),x='TotalDeaths', y='Country',orientation = 'h',
color_discrete_sequence=['#830707'],text = 'TotalDeaths',title = 'TotalDeaths')
fig_ = px.bar(dataframe.sort_values('TotalRecovered'),x='TotalRecovered',y='Country',orientation ='h',
color_discrete_sequence=['#073707'],text = 'TotalRecovered',title = 'TotalRecovered')
fig_p = make_subplots(rows =1,cols =3,subplot_titles=('TotalCases','TotalDeaths','TotalRecovered'))
fig_p.add_trace(fig_z['data'][0],row = 1,col =1)
fig_p.add_trace(fig_x['data'][0],row = 1,col =2)
fig_p.add_trace(fig_['data'][0],row=1,col=3)
fig_p.update_layout(height=3000,title ='Per Country')
fig_p.show()
totalCases = df.iloc[1:21, 0:2]
df1 = df[['Country', 'TotalDeaths']]
TotalDeaths = df1[1:21]
df2 = df[['Country', 'TotalRecovered']]
totalrecovered = df2[1:21]
data = totalCases.sort_values('TotalCases')
data1 = TotalDeaths.sort_values('TotalDeaths')
data2 = totalrecovered.sort_values('TotalRecovered')
fig1 = px.bar(data, x='TotalCases', y='Country', orientation='h', color_discrete_sequence=['#B3611A'], text='TotalCases')
fig2 = px.bar(data1, x='TotalDeaths', y='Country', orientation='h', color_discrete_sequence=['#830707'], text='TotalDeaths')
fig3 = px.bar(data2, x='TotalRecovered', y='Country', orientation='h', color_discrete_sequence=['#073707'], text='TotalRecovered')
fig = make_subplots(rows=2, cols=3, subplot_titles=('Totalconfirmed', 'Total deaths', 'total Recovered'))
fig.add_trace(fig1['data'][0], row=1, col=1)
fig.add_trace(fig2['data'][0], row=1, col=2)
fig.add_trace(fig3['data'][0], row=1, col=3)
fig.update_layout(height=1200, title='Top 20 Countries')
fig.show() | code |
34150863/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
df.dtypes | code |
34150863/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import requests
import requests
url = 'https://www.worldometers.info/coronavirus/#countries'
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest'}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
time_series = pd.read_csv('../input/corona-virus-report/covid_19_clean_complete.csv', parse_dates=['Date'])
df.isnull().sum()
df.rename(columns={'Country,Other': 'Country'}, inplace=True)
df.rename(columns={'Serious,Critical': 'Serious'}, inplace=True)
df.rename(columns={'Tot Cases/1M pop': 'TotalCases/1M'}, inplace=True)
time_series.rename(columns={'Country/Region': 'Country'}, inplace=True)
df.drop('#', axis=1, inplace=True)
df.dtypes
df.head() | code |
18116817/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
data = data.dropna()
data.rename(columns={'kepid': 'kepler_id', 'kepoi_name': 'koi_name', 'koi_disposition': 'plnt_disposition', 'koi_score': 'plnt_disp_confidence', 'koi_fpflag_nt': 'flag_nTransitLk', 'koi_fpflag_ss': 'flag_scndEvent', 'koi_fpflag_co': 'flag_centroidOffset', 'koi_fpflag_ec': 'flag_ephMatch', 'koi_period': 'orbital_period', 'koi_time0bk': 'transit_epoch', 'koi_impact': 'impact_parameter', 'koi_duration': 'transit_duration', 'koi_depth': 'transit_depth', 'koi_prad': 'planetary_radius', 'koi_teq': 'equ_temp', 'koi_insol': 'insolation_flux', 'koi_model_snr': 'transit_sigToNoise', 'koi_steff': 'stellar_eff_temp', 'koi_slogg': 'stellar_surf_gravity', 'koi_srad': 'stellar_radius', 'ra': 'right_acension', 'dec': 'declination', 'koi_kepmag': 'kepler_magnitude'}, inplace=True)
data.tail() | code |
18116817/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
print('Dropped:\n\n', koi_pond)
print(f'\nYour dataset had {raw_data.shape[1]} columns.\nIt now has {data.shape[1]} columns.') | code |
18116817/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
data = data.dropna()
print(f'The dataset had {raw_data.shape[0]} rows. It now has {data.shape[0]} rows.\n({raw_data.shape[0] - data.shape[0]} rows were dropped, leaving you with {round(data.shape[0] / raw_data.shape[0] * 100, 2)}% of the original number of entries.)') | code |
18116817/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
print('Here are all the columns:\n')
print(raw_data.columns.tolist()) | code |
18116817/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
data = data.dropna()
data.rename(columns={'kepid': 'kepler_id', 'kepoi_name': 'koi_name', 'koi_disposition': 'plnt_disposition', 'koi_score': 'plnt_disp_confidence', 'koi_fpflag_nt': 'flag_nTransitLk', 'koi_fpflag_ss': 'flag_scndEvent', 'koi_fpflag_co': 'flag_centroidOffset', 'koi_fpflag_ec': 'flag_ephMatch', 'koi_period': 'orbital_period', 'koi_time0bk': 'transit_epoch', 'koi_impact': 'impact_parameter', 'koi_duration': 'transit_duration', 'koi_depth': 'transit_depth', 'koi_prad': 'planetary_radius', 'koi_teq': 'equ_temp', 'koi_insol': 'insolation_flux', 'koi_model_snr': 'transit_sigToNoise', 'koi_steff': 'stellar_eff_temp', 'koi_slogg': 'stellar_surf_gravity', 'koi_srad': 'stellar_radius', 'ra': 'right_acension', 'dec': 'declination', 'koi_kepmag': 'kepler_magnitude'}, inplace=True)
def spectral_classify(temp):
if temp > 33000:
return 'O'
elif temp > 10000:
return 'B'
elif temp > 7500:
return 'A'
elif temp > 6000:
return 'F'
elif temp > 5200:
return 'G'
elif temp > 3700:
return 'K'
else:
return 'M'
spectral_classes = []
for temp in data['stellar_eff_temp'].values:
spectral_classes.append(spectral_classify(temp))
data['stellar_spectral_cl'] = spectral_classes
print('Spectral classes added to dataset.') | code |
18116817/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
data = data.dropna()
data.rename(columns={'kepid': 'kepler_id', 'kepoi_name': 'koi_name', 'koi_disposition': 'plnt_disposition', 'koi_score': 'plnt_disp_confidence', 'koi_fpflag_nt': 'flag_nTransitLk', 'koi_fpflag_ss': 'flag_scndEvent', 'koi_fpflag_co': 'flag_centroidOffset', 'koi_fpflag_ec': 'flag_ephMatch', 'koi_period': 'orbital_period', 'koi_time0bk': 'transit_epoch', 'koi_impact': 'impact_parameter', 'koi_duration': 'transit_duration', 'koi_depth': 'transit_depth', 'koi_prad': 'planetary_radius', 'koi_teq': 'equ_temp', 'koi_insol': 'insolation_flux', 'koi_model_snr': 'transit_sigToNoise', 'koi_steff': 'stellar_eff_temp', 'koi_slogg': 'stellar_surf_gravity', 'koi_srad': 'stellar_radius', 'ra': 'right_acension', 'dec': 'declination', 'koi_kepmag': 'kepler_magnitude'}, inplace=True)
print(data.columns.tolist()) | code |
18116817/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # more plots
raw_data = pd.read_csv('../input/cumulative.csv')
koi_pond = ['koi_pdisposition', 'koi_tce_plnt_num', 'koi_tce_delivname', 'kepler_name']
cols = raw_data.columns
for c in cols:
if 'err' in c:
koi_pond.append(c)
data = raw_data.drop(koi_pond, axis=1)
data = data.dropna()
data.rename(columns={'kepid': 'kepler_id', 'kepoi_name': 'koi_name', 'koi_disposition': 'plnt_disposition', 'koi_score': 'plnt_disp_confidence', 'koi_fpflag_nt': 'flag_nTransitLk', 'koi_fpflag_ss': 'flag_scndEvent', 'koi_fpflag_co': 'flag_centroidOffset', 'koi_fpflag_ec': 'flag_ephMatch', 'koi_period': 'orbital_period', 'koi_time0bk': 'transit_epoch', 'koi_impact': 'impact_parameter', 'koi_duration': 'transit_duration', 'koi_depth': 'transit_depth', 'koi_prad': 'planetary_radius', 'koi_teq': 'equ_temp', 'koi_insol': 'insolation_flux', 'koi_model_snr': 'transit_sigToNoise', 'koi_steff': 'stellar_eff_temp', 'koi_slogg': 'stellar_surf_gravity', 'koi_srad': 'stellar_radius', 'ra': 'right_acension', 'dec': 'declination', 'koi_kepmag': 'kepler_magnitude'}, inplace=True)
star_colors = ['#fc0303', '#fc8403', '#fcf403', '#fffb91', '#ffffff', '#d1f4ff', '#00a1ff']
sns.set_style('dark')
sns.lmplot('stellar_radius', 'stellar_surf_gravity', data, fit_reg=False, hue='stellar_spectral_cl', palette=star_colors, markers='*', scatter_kws={'alpha': 0.3, 's': 200}) | code |
50244427/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.")
print("_"*20)
print(f"Unique cities count:\n{df['city'].value_counts()}")
print("*"*50, end="\n\n")
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d")
ax.set_title("Unique city count bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Count",fontsize=14)
plt.show()
city_with_dev = df.groupby(['city']).mean()['city_development_index'].reset_index()
city_with_dev = city_with_dev.sort_values(by=['city_development_index'], ascending=False).reset_index(drop=True)
fig, ax = plt.subplots(figsize=(16, 6))
sns.barplot(ax=ax, x=city_with_dev['city'][:15],
y=city_with_dev['city_development_index'][:15],
palette="Blues_d")
ax.set_title("Top 15 cities with best development index bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Development Index",fontsize=14)
plt.show()
print(f"Dataset has {len(df['gender'].dropna().unique())} unique gender's data.")
print("_"*20)
print(f"Unique Gender counts:\n{df['gender'].value_counts()}")
print("*"*50, end="\n\n")
total = df.shape[0]
total_male = df.query("gender == 'Male'")
total_female = df.query("gender == 'Female'")
total_other = df.query("gender == 'Other'")
male_percent = round(len(total_male)*100/total, 3)
female_percent = round(len(total_female)*100/total, 3)
other_percent = round(len(total_female)*100/total, 3)
labels = 'Male Percentage', 'Female Percentage', 'Other Percentage'
sizes = [male_percent, female_percent, other_percent]
explode = (0.05, 0.05, 0.05)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender spread in Data', fontsize=15)
plt.savefig("./gender_pie.png")
plt.show()
total = len(df.query('target == 1.0'))
total_male_target = df.query("gender == 'Male' and target == 1.0")
total_female_target = df.query("gender == 'Female' and target == 1.0")
total_other_target = df.query("gender == 'Other' and target == 1.0")
male_target_percent = round(len(total_male_target) * 100 / total, 3)
female_target_percent = round(len(total_female_target) * 100 / total, 3)
other_target_percent = round(len(total_other_target) * 100 / total, 3)
labels = ('Male Percentage', 'Female Percentage', 'Other Percentage')
sizes = [male_target_percent, female_target_percent, other_target_percent]
explode = (0.1, 0.1, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender with target = 1.0', fontsize=15)
plt.savefig('./gender_target_1_pie.png')
plt.show() | code |
50244427/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city_development_index'].dropna().unique())} unique city development indices.")
print('_' * 20)
print(f"Unique City Development Indices:\n{df['city_development_index'].value_counts()}")
print('*' * 50, end='\n\n') | code |
50244427/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
df.head() | code |
50244427/cell_2 | [
"text_html_output_1.png"
] | import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50244427/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.")
print("_"*20)
print(f"Unique cities count:\n{df['city'].value_counts()}")
print("*"*50, end="\n\n")
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d")
ax.set_title("Unique city count bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Count",fontsize=14)
plt.show()
city_with_dev = df.groupby(['city']).mean()['city_development_index'].reset_index()
city_with_dev = city_with_dev.sort_values(by=['city_development_index'], ascending=False).reset_index(drop=True)
fig, ax = plt.subplots(figsize=(16, 6))
sns.barplot(ax=ax, x=city_with_dev['city'][:15], y=city_with_dev['city_development_index'][:15], palette='Blues_d')
ax.set_title('Top 15 cities with best development index bar chart', fontsize=16)
ax.set_xlabel('City', fontsize=14)
ax.set_ylabel('Development Index', fontsize=14)
plt.show() | code |
50244427/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f'Dataset has {df.shape[0]} rows and {df.shape[1]} columns.')
print('*' * 50, end='\n\n')
print(f"Dataset has {len(df['enrollee_id'].dropna().unique())} unique user's data.")
print('*' * 50, end='\n\n') | code |
50244427/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.")
print('_' * 20)
print(f"Unique cities count:\n{df['city'].value_counts()}")
print('*' * 50, end='\n\n')
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=0.2, palette='Blues_d')
ax.set_title('Unique city count bar chart', fontsize=16)
ax.set_xlabel('City', fontsize=14)
ax.set_ylabel('Count', fontsize=14)
plt.show() | code |
50244427/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.")
print("_"*20)
print(f"Unique cities count:\n{df['city'].value_counts()}")
print("*"*50, end="\n\n")
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d")
ax.set_title("Unique city count bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Count",fontsize=14)
plt.show()
city_with_dev = df.groupby(['city']).mean()['city_development_index'].reset_index()
city_with_dev = city_with_dev.sort_values(by=['city_development_index'], ascending=False).reset_index(drop=True)
fig, ax = plt.subplots(figsize=(16, 6))
sns.barplot(ax=ax, x=city_with_dev['city'][:15],
y=city_with_dev['city_development_index'][:15],
palette="Blues_d")
ax.set_title("Top 15 cities with best development index bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Development Index",fontsize=14)
plt.show()
print(f"Dataset has {len(df['gender'].dropna().unique())} unique gender's data.")
print("_"*20)
print(f"Unique Gender counts:\n{df['gender'].value_counts()}")
print("*"*50, end="\n\n")
total = df.shape[0]
total_male = df.query("gender == 'Male'")
total_female = df.query("gender == 'Female'")
total_other = df.query("gender == 'Other'")
male_percent = round(len(total_male)*100/total, 3)
female_percent = round(len(total_female)*100/total, 3)
other_percent = round(len(total_female)*100/total, 3)
labels = 'Male Percentage', 'Female Percentage', 'Other Percentage'
sizes = [male_percent, female_percent, other_percent]
explode = (0.05, 0.05, 0.05)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender spread in Data', fontsize=15)
plt.savefig("./gender_pie.png")
plt.show()
total = len(df.query("target == 1.0"))
total_male_target = df.query("gender == 'Male' and target == 1.0")
total_female_target = df.query("gender == 'Female' and target == 1.0")
total_other_target = df.query("gender == 'Other' and target == 1.0")
male_target_percent = round(len(total_male_target)*100/total, 3)
female_target_percent = round(len(total_female_target)*100/total, 3)
other_target_percent = round(len(total_other_target)*100/total, 3)
labels = 'Male Percentage', 'Female Percentage', 'Other Percentage'
sizes = [male_target_percent, female_target_percent, other_target_percent]
explode = (0.1, 0.1, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender with target = 1.0', fontsize=15)
plt.savefig("./gender_target_1_pie.png")
plt.show()
total = len(df.query('target == 0.0'))
total_male_target = df.query("gender == 'Male' and target == 0.0")
total_female_target = df.query("gender == 'Female' and target == 0.0")
total_other_target = df.query("gender == 'Other' and target == 0.0")
male_target_percent = round(len(total_male_target) * 100 / total, 3)
female_target_percent = round(len(total_female_target) * 100 / total, 3)
other_target_percent = round(len(total_other_target) * 100 / total, 3)
labels = ('Male Percentage', 'Female Percentage', 'Other Percentage')
sizes = [male_target_percent, female_target_percent, other_target_percent]
explode = (0.1, 0.1, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender with target = 0.0', fontsize=15)
plt.savefig('./gender_target_0_pie.png')
plt.show() | code |
50244427/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
city_with_dev = df.groupby(['city']).mean()['city_development_index'].reset_index()
city_with_dev = city_with_dev.sort_values(by=['city_development_index'], ascending=False).reset_index(drop=True)
city_with_dev.head() | code |
50244427/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
print(f"Dataset has {len(df['city'].dropna().unique())} unique cities.")
print("_"*20)
print(f"Unique cities count:\n{df['city'].value_counts()}")
print("*"*50, end="\n\n")
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ax=ax, x=df['city'].value_counts().index[:10], y=df['city'].value_counts().values[:10], capsize=.2,palette="Blues_d")
ax.set_title("Unique city count bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Count",fontsize=14)
plt.show()
city_with_dev = df.groupby(['city']).mean()['city_development_index'].reset_index()
city_with_dev = city_with_dev.sort_values(by=['city_development_index'], ascending=False).reset_index(drop=True)
fig, ax = plt.subplots(figsize=(16, 6))
sns.barplot(ax=ax, x=city_with_dev['city'][:15],
y=city_with_dev['city_development_index'][:15],
palette="Blues_d")
ax.set_title("Top 15 cities with best development index bar chart",fontsize=16)
ax.set_xlabel("City",fontsize=14)
ax.set_ylabel("Development Index",fontsize=14)
plt.show()
print(f"Dataset has {len(df['gender'].dropna().unique())} unique gender's data.")
print('_' * 20)
print(f"Unique Gender counts:\n{df['gender'].value_counts()}")
print('*' * 50, end='\n\n')
total = df.shape[0]
total_male = df.query("gender == 'Male'")
total_female = df.query("gender == 'Female'")
total_other = df.query("gender == 'Other'")
male_percent = round(len(total_male) * 100 / total, 3)
female_percent = round(len(total_female) * 100 / total, 3)
other_percent = round(len(total_female) * 100 / total, 3)
labels = ('Male Percentage', 'Female Percentage', 'Other Percentage')
sizes = [male_percent, female_percent, other_percent]
explode = (0.05, 0.05, 0.05)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Gender spread in Data', fontsize=15)
plt.savefig('./gender_pie.png')
plt.show() | code |
50244427/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
import os
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
TRAIN_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_train.csv'
TEST_PATH = '/kaggle/input/hr-analytics-job-change-of-data-scientists/aug_test.csv'
df = pd.read_csv(TRAIN_PATH)
df['relevent_experience'].dropna().unique() | code |
18143144/cell_42 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
X_test = test_set.drop(['PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
print(X_test.head())
X_test = X_test.values | code |
18143144/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
train_set[['Pclass', 'Survived']].groupby(['Pclass']).mean().sort_values(by='Survived', ascending=False).plot(kind='bar') | code |
18143144/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.head() | code |
18143144/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
g = sns.FacetGrid(train_set, col='Survived')
g.map(plt.hist, 'Age', bins=20)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
grid = sns.FacetGrid(train_set, col='Survived')
grid.map(plt.hist, 'Fare')
grid = sns.FacetGrid(train_set, row='Embarked', col='Survived', size=2.2, aspect=1.6)
grid.map(plt.bar, 'Sex', 'Fare')
grid = sns.FacetGrid(train_set, col='Survived', row='Pclass')
grid.map(plt.hist, 'Age') | code |
18143144/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
g = sns.FacetGrid(train_set, col='Survived')
g.map(plt.hist, 'Age', bins=20)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
grid = sns.FacetGrid(train_set, col='Survived')
grid.map(plt.hist, 'Fare') | code |
18143144/cell_33 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
g = sns.FacetGrid(train_set, col='Survived')
g.map(plt.hist, 'Age', bins=20)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
grid = sns.FacetGrid(train_set, col='Survived')
grid.map(plt.hist, 'Fare')
grid = sns.FacetGrid(train_set, row='Embarked', col='Survived', size=2.2, aspect=1.6)
grid.map(plt.bar, 'Sex', 'Fare') | code |
18143144/cell_44 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
X_train = X_train.values
X_train[:, [1, 5]] | code |
18143144/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
g = sns.FacetGrid(train_set, col='Survived')
g.map(plt.hist, 'Age', bins=20) | code |
18143144/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts() | code |
18143144/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
X_train = X_train.values
y_train = train_set.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1)
print(y_train.head())
y_train = y_train.values | code |
18143144/cell_39 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
print(X_train.head())
X_train = X_train.values | code |
18143144/cell_48 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
X_train = X_train.values
labelEncoder = LabelEncoder()
X_train[:, 5] = labelEncoder.fit_transform(X_train[:, 5])
labelEncoder = LabelEncoder()
X_train[:, 5] = labelEncoder.fit_transform(X_train[:, 5])
print(X_train) | code |
18143144/cell_49 | [
"text_plain_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
X_train = X_train.values
y_train = train_set.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1)
y_train = y_train.values
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train) | code |
18143144/cell_32 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
train_set[['Survived', 'Embarked']].groupby('Embarked').mean().plot(kind='bar') | code |