path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2019512/cell_5 | [
"image_output_1.png"
] | import seaborn as sns
import matplotlib.pyplot as plt
def plot_dist(name, frame, color='green'):
name0 = '{}_'.format(name)
ticklabel_op = True
if name0 not in frame.columns:
name0 = name
ticklabel_op = False
data_count = len(frame[name0].unique())
if data_count > 3:
sns.distplot(frame[name0], rug=False, color=color)
if data_count < 10:
plt.xticks(frame[name0].unique())
else:
sns.countplot(frame[name0], color=color)
name1 = ''.join([char.upper() if i == 0 else char for i, char in enumerate(list(name))])
if ticklabel_op == True:
cat = dict(enumerate(frame[name].cat.categories))
ax = plt.gca()
if len(cat) > 5:
rotation = 45
ha = 'right'
else:
rotation = 0
ha = 'center'
ax.xaxis.set_ticklabels([cat[i] for i in sorted(frame[name0].unique())], rotation=rotation, ha=ha)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(nrows=3, ncols=3, figsize=(12, 10))
plt.subplots_adjust(hspace=0.3, wspace=0.3)
cols_to_plot = list(frame.columns)
cols_to_plot.remove('left')
for ax, name in zip(fig.axes, cols_to_plot):
plt.subplot(ax)
plot_dist(name, frame)
sns.despine() | code |
50212972/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count()
plt.figure(figsize=(10, 5))
plt.title('Survived vs. Sex')
sns.countplot('Sex', hue='Survived', data=data) | code |
50212972/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/titanic/train.csv')
data.head() | code |
50212972/cell_20 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count()
hm_data = data[['Survived', 'Pclass']]
hm_data = hm_data.groupby(['Pclass', 'Survived'])['Survived'].count()
hm_data = hm_data.unstack()
pd.crosstab([data.Sex, data.Survived], data.Pclass, margins=True).style.background_gradient(cmap='winter')
plt.figure(figsize=(10, 5))
sns.factorplot('Pclass', 'Survived', hue='Sex', data=data)
plt.title('Survived vs. Sex + Pclass') | code |
50212972/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count()
pd.crosstab([data.Sex, data.Survived], data.Pclass, margins=True).style.background_gradient(cmap='winter') | code |
50212972/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('fivethirtyeight')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50212972/cell_8 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
plt.figure(figsize=(10, 5))
sns.countplot('Survived', data=data) | code |
50212972/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count()
hm_data = data[['Survived', 'Pclass']]
hm_data = hm_data.groupby(['Pclass', 'Survived'])['Survived'].count()
hm_data = hm_data.unstack()
print(hm_data)
plt.figure(figsize=(10, 5))
sns.heatmap(hm_data) | code |
50212972/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count()
hm_data = data[['Survived', 'Pclass']]
hm_data = hm_data.groupby(['Pclass', 'Survived'])['Survived'].count()
hm_data = hm_data.unstack()
plt.figure(figsize=(10, 5))
plt.title('Pclass vs Survived')
sns.countplot('Pclass', hue='Survived', data=data) | code |
50212972/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum()
data.groupby(['Sex', 'Survived'])['Survived'].count() | code |
50212972/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/titanic/train.csv')
data.isnull().sum() | code |
129010000/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
clf = RandomForestClassifier(max_depth=3, random_state=0)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
print(classification_report(y, y_pred))
print(matthews_corrcoef(y, y_pred))
print('\n\nvalidation')
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
print(classification_report(y_val, y_pred_val))
print(matthews_corrcoef(y_val, y_pred_val)) | code |
129010000/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
plt.figure(figsize=(7, 5))
plt.plot(fpr, tpr, label='LSA-LR', linewidth=2)
plt.grid()
plt.title('ROC')
plt.xlabel('False positive rate-------->')
plt.ylabel('True positive rate--------->')
plt.legend(loc='lower right')
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
print('optimal_threshold', optimal_threshold, '\n') | code |
129010000/cell_4 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
print(x.shape)
y.shape | code |
129010000/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape | code |
129010000/cell_2 | [
"text_plain_output_1.png"
] | from google.colab import drive
from google.colab import drive
drive.mount('/content/drive') | code |
129010000/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
clf = RandomForestClassifier(max_depth=3, random_state=0)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy')
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
x.shape
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy')
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
y_val.shape
clf = RandomForestClassifier(n_estimators=100, max_depth=3, random_state=1)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
plt.figure(figsize=(7, 5))
plt.plot(fpr, tpr, label='LSA-LR', linewidth=2)
plt.grid()
plt.title('ROC')
plt.xlabel('False positive rate-------->')
plt.ylabel('True positive rate--------->')
plt.legend(loc='lower right')
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
print('optimal_threshold', optimal_threshold, '\n') | code |
129010000/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
clf = RandomForestClassifier(max_depth=3, random_state=0)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy')
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
print(y.shape)
x.shape | code |
129010000/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
clf = RandomForestClassifier(max_depth=3, random_state=0)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy')
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
x.shape
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy')
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
print(x_val.shape)
y_val.shape | code |
129010000/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
print(classification_report(y, y_pred))
print(matthews_corrcoef(y, y_pred))
print('\n\nvalidation')
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
print(classification_report(y_val, y_pred_val))
print(matthews_corrcoef(y_val, y_pred_val)) | code |
129010000/cell_12 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import numpy as np
img_label = np.load('/content/drive/MyDrive/HMD_project/new/embedding_train_img_norm.npy')
txt_input = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy')
img = img_label[:, 0:-1]
label = img_label[:, -1]
txt = txt_input
img_txt = np.concatenate((img, txt), axis=1)
x = img_txt
y = label
y.shape
img_label_val = np.load('/content/drive/MyDrive/HMD_project/new/embedding_val_img_norm.npy')
txt_input_val = np.load('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy')
img_val = img_label_val[:, 0:-1]
label_val = img_label_val[:, -1]
txt_val = txt_input_val
img_txt_val = np.concatenate((img_val, txt_val), axis=1)
x_val = img_txt_val
y_val = label_val
x_val.shape
clf = LogisticRegression(max_iter=1000, C=0.1, penalty='l2')
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
y_pred = np.where(clf.predict_proba(x)[:, 1] > optimal_threshold, 1, 0)
y_pred_val = np.where(clf.predict_proba(x_val)[:, 1] > optimal_threshold, 1, 0)
clf = RandomForestClassifier(max_depth=3, random_state=0)
clf.fit(x, y)
y_pred_p = clf.predict_proba(x)
b = y_pred_p[:, -1]
b = b.reshape(y.shape)
fpr, tpr, th = roc_curve(y, b)
plt.figure(figsize=(7, 5))
plt.plot(fpr, tpr, label='LSA-LR', linewidth=2)
plt.grid()
plt.title('ROC')
plt.xlabel('False positive rate-------->')
plt.ylabel('True positive rate--------->')
plt.legend(loc='lower right')
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = th[optimal_idx]
print('optimal_threshold', optimal_threshold, '\n') | code |
73061941/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import numpy as np
import numpy as np # linear algebra
import os
import os
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import os
class EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=0, pool=True) -> None:
super(EncoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), padding=1)
self.relu = nn.ReLU()
if pool:
self.pool = nn.MaxPool2d(kernel_size=2)
else:
self.pool = None
if dropout > 0:
self.drop = nn.Dropout()
else:
self.drop = None
def forward(self, x):
skip = self.relu(self.conv2(self.relu(self.conv1(x))))
out = skip
if self.pool:
out = self.pool(out)
if self.drop:
out = self.drop(out)
return [out, skip]
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels) -> None:
super(DecoderBlock, self).__init__()
self.trans_conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv1 = nn.Conv2d(out_channels * 2, out_channels, padding=1, kernel_size=3)
self.conv2 = nn.Conv2d(out_channels, out_channels, padding=1, kernel_size=3)
self.relu = nn.ReLU()
def forward(self, x, skip):
x = self.trans_conv(x)
x = torch.cat((skip, x), dim=1)
x = self.relu(self.conv2(self.relu(self.conv1(x))))
return x
class Unet(nn.Module):
def __init__(self, in_channels, classes, n_filters=32) -> None:
super(Unet, self).__init__()
self.in_channels = in_channels
self.n_filters = n_filters
self.classes = classes
self.e1 = EncoderBlock(in_channels=self.in_channels, out_channels=self.n_filters, dropout=0.2)
self.e2 = EncoderBlock(in_channels=self.n_filters, out_channels=self.n_filters * 2, dropout=0.2)
self.e3 = EncoderBlock(in_channels=self.n_filters * 2, out_channels=self.n_filters * 4, dropout=0.2)
self.e4 = EncoderBlock(in_channels=self.n_filters * 4, out_channels=self.n_filters * 8, dropout=0.2)
self.e5 = EncoderBlock(in_channels=self.n_filters * 8, out_channels=self.n_filters * 16, dropout=0.2, pool=False)
self.d6 = DecoderBlock(in_channels=self.n_filters * 16, out_channels=self.n_filters * 8)
self.d7 = DecoderBlock(in_channels=self.n_filters * 8, out_channels=self.n_filters * 4)
self.d8 = DecoderBlock(in_channels=self.n_filters * 4, out_channels=self.n_filters * 2)
self.d9 = DecoderBlock(in_channels=self.n_filters * 2, out_channels=self.n_filters)
self.conv1 = nn.Conv2d(in_channels=self.n_filters, out_channels=self.n_filters, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=self.n_filters, out_channels=classes, kernel_size=1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x1 = self.e1(x)
x2 = self.e2(x1[0])
x3 = self.e3(x2[0])
x4 = self.e4(x3[0])
x5 = self.e5(x4[0])
x6 = self.d6(x5[0], x4[1])
x7 = self.d7(x6, x3[1])
x8 = self.d8(x7, x2[1])
x9 = self.d9(x8, x1[1])
x10 = self.relu(self.conv1(x9))
x11 = self.conv2(x10)
return x11
class UnetDataset(Dataset):
def __init__(self, img_path, mask_path, transform=None) -> None:
super(UnetDataset, self).__init__()
self.img_path = img_path
self.mask_path = mask_path
self.transform = transform
def __len__(self):
return len(os.listdir(self.img_path))
def __getitem__(self, index):
img = Image.open(self.img_path + '/' + os.listdir(self.img_path)[index])
mask = Image.open(self.mask_path + '/' + os.listdir(self.mask_path)[index])
if self.transform:
img = self.transform(img)
mask = self.transform(mask)
mask = torch.max(mask, dim=0)
mask = mask[0].type(torch.LongTensor)
return (img, mask)
img = Image.open('../input/lyft-udacity-challenge/dataA/dataA/CameraRGB/02_00_000.png').resize((512, 512))
img = torch.tensor(np.asarray(img).reshape(1, 3, 512, 512)) | code |
73061941/cell_12 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
class EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=0, pool=True) -> None:
super(EncoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), padding=1)
self.relu = nn.ReLU()
if pool:
self.pool = nn.MaxPool2d(kernel_size=2)
else:
self.pool = None
if dropout > 0:
self.drop = nn.Dropout()
else:
self.drop = None
def forward(self, x):
skip = self.relu(self.conv2(self.relu(self.conv1(x))))
out = skip
if self.pool:
out = self.pool(out)
if self.drop:
out = self.drop(out)
return [out, skip]
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels) -> None:
super(DecoderBlock, self).__init__()
self.trans_conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv1 = nn.Conv2d(out_channels * 2, out_channels, padding=1, kernel_size=3)
self.conv2 = nn.Conv2d(out_channels, out_channels, padding=1, kernel_size=3)
self.relu = nn.ReLU()
def forward(self, x, skip):
x = self.trans_conv(x)
x = torch.cat((skip, x), dim=1)
x = self.relu(self.conv2(self.relu(self.conv1(x))))
return x
class Unet(nn.Module):
def __init__(self, in_channels, classes, n_filters=32) -> None:
super(Unet, self).__init__()
self.in_channels = in_channels
self.n_filters = n_filters
self.classes = classes
self.e1 = EncoderBlock(in_channels=self.in_channels, out_channels=self.n_filters, dropout=0.2)
self.e2 = EncoderBlock(in_channels=self.n_filters, out_channels=self.n_filters * 2, dropout=0.2)
self.e3 = EncoderBlock(in_channels=self.n_filters * 2, out_channels=self.n_filters * 4, dropout=0.2)
self.e4 = EncoderBlock(in_channels=self.n_filters * 4, out_channels=self.n_filters * 8, dropout=0.2)
self.e5 = EncoderBlock(in_channels=self.n_filters * 8, out_channels=self.n_filters * 16, dropout=0.2, pool=False)
self.d6 = DecoderBlock(in_channels=self.n_filters * 16, out_channels=self.n_filters * 8)
self.d7 = DecoderBlock(in_channels=self.n_filters * 8, out_channels=self.n_filters * 4)
self.d8 = DecoderBlock(in_channels=self.n_filters * 4, out_channels=self.n_filters * 2)
self.d9 = DecoderBlock(in_channels=self.n_filters * 2, out_channels=self.n_filters)
self.conv1 = nn.Conv2d(in_channels=self.n_filters, out_channels=self.n_filters, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=self.n_filters, out_channels=classes, kernel_size=1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x1 = self.e1(x)
x2 = self.e2(x1[0])
x3 = self.e3(x2[0])
x4 = self.e4(x3[0])
x5 = self.e5(x4[0])
x6 = self.d6(x5[0], x4[1])
x7 = self.d7(x6, x3[1])
x8 = self.d8(x7, x2[1])
x9 = self.d9(x8, x1[1])
x10 = self.relu(self.conv1(x9))
x11 = self.conv2(x10)
return x11
myTrans = transforms.Compose([transforms.ToTensor(), transforms.Resize((512, 512))])
batch = 8
unet = Unet(3, 13)
seg = UnetDataset('../input/lyft-udacity-challenge/dataA/dataA/CameraRGB', '../input/lyft-udacity-challenge/dataA/dataA/CameraSeg', transform=myTrans)
loader = DataLoader(seg, batch_size=batch, pin_memory=True)
lr = 0.0001
n_epochs = 1
lossfn = nn.CrossEntropyLoss()
optimizer = optim.Adam(unet.parameters(), lr=lr)
def train():
loop = tqdm(loader)
running_loss = 0
for b_id, (x, y) in enumerate(loop):
pred = unet(x)
loss = lossfn(pred, y)
loss_val = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss_val
loop.set_postfix_str(f'loss: {loss_val}')
for epoch in range(n_epochs):
train() | code |
72112151/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.head() | code |
72112151/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
y = train['target']
features = train.drop(['target'], axis=1)
features.head() | code |
72112151/cell_11 | [
"text_html_output_1.png"
] | from xgboost import XGBRegressor
model = XGBRegressor(n_estimators=1000, learning_rate=0.01, n_jobs=4)
model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) | code |
72112151/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
y = train['target']
features = train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
X = features.copy()
X_test = test.copy()
ordinal_encoder = OrdinalEncoder()
X[object_cols] = ordinal_encoder.fit_transform(features[object_cols])
X_test[object_cols] = ordinal_encoder.transform(test[object_cols])
X.head() | code |
72112151/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor
model = XGBRegressor(n_estimators=1000, learning_rate=0.01, n_jobs=4)
model.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False)
preds_valid = model.predict(X_valid)
print(mean_squared_error(y_valid, preds_valid, squared=False)) | code |
72112151/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train.info() | code |
88086160/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('./train.csv')
train.index = train['image'].copy()
train['image'] = train['image'].str[:-3] + 'bmp'
train['bbox'] = pd.read_csv('../input/happywhale-boundingbox-yolov5/train.csv', index_col='image')['bbox']
train['bbox'] = train['bbox'].fillna('[]').map(eval)
train | code |
88086160/cell_7 | [
"text_plain_output_1.png"
] | import cv2
import os
import pandas as pd
IMAGE_SIZE = 224
train = pd.read_csv('./train.csv')
train.index = train['image'].copy()
train['image'] = train['image'].str[:-3] + 'bmp'
train['bbox'] = pd.read_csv('../input/happywhale-boundingbox-yolov5/train.csv', index_col='image')['bbox']
train['bbox'] = train['bbox'].fillna('[]').map(eval)
train
sample_submission = pd.read_csv('./sample_submission.csv')
sample_submission.index = sample_submission['image'].copy()
sample_submission['inference_image'] = sample_submission['image'].str[:-3] + 'bmp'
sample_submission['bbox'] = pd.read_csv('../input/happywhale-boundingbox-yolov5/test.csv', index_col='image')['bbox']
sample_submission['bbox'] = sample_submission['bbox'].fillna('[]').map(eval)
sample_submission
train.to_csv('./train.csv', index=False)
sample_submission.to_csv('./sample_submission.csv', index=False)
def copy_dir(dirname, base_path='../input/happy-whale-and-dolphin/'):
bboxes = train['bbox'] if dirname == 'train_images' else sample_submission['bbox']
print('Copying', dirname)
path = os.path.join(base_path, dirname)
images = list(os.listdir(path))
n = len(images)
for i, f in enumerate(images):
print(f'{i}/{n}', end='\r')
image_path = os.path.join(path, f)
image = cv2.imread(image_path)
if len(bboxes[f]):
bbox = bboxes[f][0]
xmin, ymin, xmax, ymax = bbox
image = image[ymin:ymax, xmin:xmax]
image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_CUBIC)
new_path = os.path.join('./', dirname, f.split('.')[0] + '.bmp')
cv2.imwrite(new_path, image)
copy_dir('train_images')
copy_dir('test_images') | code |
88086160/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('./train.csv')
train.index = train['image'].copy()
train['image'] = train['image'].str[:-3] + 'bmp'
train['bbox'] = pd.read_csv('../input/happywhale-boundingbox-yolov5/train.csv', index_col='image')['bbox']
train['bbox'] = train['bbox'].fillna('[]').map(eval)
train
sample_submission = pd.read_csv('./sample_submission.csv')
sample_submission.index = sample_submission['image'].copy()
sample_submission['inference_image'] = sample_submission['image'].str[:-3] + 'bmp'
sample_submission['bbox'] = pd.read_csv('../input/happywhale-boundingbox-yolov5/test.csv', index_col='image')['bbox']
sample_submission['bbox'] = sample_submission['bbox'].fillna('[]').map(eval)
sample_submission | code |
17116992/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import IsolationForest
from sklearn.metrics import classification_report,accuracy_score
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcard.csv')
fraud_ratio = float(data['Class'][data['Class'] == 1].shape[0]) / data.shape[0]
X = data.iloc[:, :-1]
Y = data['Class']
(Y.shape, X.shape)
classifiers = {'Isolation Forest': IsolationForest(n_estimators=100, max_samples=len(X))}
n_outliers = 492
for i, (clf_name, clf) in enumerate(classifiers.items()):
if clf_name == 'Local Outlier Factor':
y_pred = clf.fit_predict(X)
scores_prediction = clf.negative_outlier_factor_
elif clf_name == 'Support Vector Machine':
clf.fit(X)
y_pred = clf.predict(X)
else:
clf.fit(X)
scores_prediction = clf.decision_function(X)
y_pred = clf.predict(X_test)
y_pred[y_pred == 1] = 0
y_pred[y_pred == -1] = 1
n_errors = (y_pred != Y_test).sum()
print('{}: {}'.format(clf_name, n_errors))
print('Accuracy Score :')
print(accuracy_score(Y_test, y_pred))
print('Classification Report :')
print(classification_report(Y_test, y_pred)) | code |
17116992/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/creditcard.csv')
sns.countplot(data['Class']) | code |
17116992/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcard.csv')
fraud_ratio = float(data['Class'][data['Class'] == 1].shape[0]) / data.shape[0]
data.head() | code |
17116992/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcard.csv')
fraud_ratio = float(data['Class'][data['Class'] == 1].shape[0]) / data.shape[0]
X = data.iloc[:, :-1]
Y = data['Class']
(Y.shape, X.shape) | code |
17116992/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
17116992/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcard.csv')
fraud_ratio = float(data['Class'][data['Class'] == 1].shape[0]) / data.shape[0]
data['Class'].value_counts() | code |
17116992/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/creditcard.csv')
data.info() | code |
1005390/cell_6 | [
"text_plain_output_1.png"
] | from sklearn import svm
clf = svm.SVC(kernel='rbf', gamma=0.01, C=10)
clf.fit(train_images, train_labels.values.ravel())
clf.score(test_images, test_labels) | code |
1005390/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import matplotlib.pyplot as plt, matplotlib.image as mpimg
from sklearn.cross_validation import train_test_split
from sklearn import svm
import numpy as np | code |
106205797/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
print(c.dtype) | code |
106205797/cell_4 | [
"text_plain_output_1.png"
] | pip install numpy | code |
106205797/cell_23 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
print(e.dtype) | code |
106205797/cell_30 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
Y = np.ones((3, 4))
Z = np.full((3, 4), 7)
print(Z) | code |
106205797/cell_33 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
Y = np.ones((3, 4))
Z = np.full((3, 4), 7)
I = np.eye(5)
D = np.diag([1, 2, 3, 4])
N = np.arange(10)
M = np.arange(10, 30)
O = np.arange(10, 30, 3)
print(N)
print(M)
print(O) | code |
106205797/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print('rank: ', b.ndim) | code |
106205797/cell_29 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
Y = np.ones((3, 4))
print(Y) | code |
106205797/cell_26 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
print(y) | code |
106205797/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(b.shape) | code |
106205797/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(b) | code |
106205797/cell_32 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
Y = np.ones((3, 4))
Z = np.full((3, 4), 7)
I = np.eye(5)
D = np.diag([1, 2, 3, 4])
print(D) | code |
106205797/cell_28 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
print(X) | code |
106205797/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
print('rank: ', a.ndim) | code |
106205797/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
print(a.shape) | code |
106205797/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
print(a.dtype) | code |
106205797/cell_31 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
x = np.array([1, 2, 3, 4, 5])
np.save('my_array', x)
y = np.load('my_array.npy')
X = np.zeros((3, 4))
Y = np.ones((3, 4))
Z = np.full((3, 4), 7)
I = np.eye(5)
print(I) | code |
106205797/cell_24 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
e = np.array([1, 2.3, 5])
f = np.array([1, 2.3, 4], dtype=np.int64)
print(f.dtype) | code |
106205797/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
print(a)
print(type(a)) | code |
106205797/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
start = time.time()
np.mean(x)
a = np.array([1, 2, 3, 4, 5])
b = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
c = np.array(['Hello', 'World'])
d = np.array([1, 2, 3, 'Hello'])
print(d.dtype) | code |
106205797/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
import time
import time
import numpy as np
x = np.random.random(100000000)
start = time.time()
sum(x) / len(x)
print('using built-in python function: ', time.time() - start)
start = time.time()
np.mean(x)
print('using NumPy: ', time.time() - start) | code |
32071774/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.rename(columns={'Density (P/Km²)': 'Density'}, inplace=True)
pop_info.columns
country_lookup = pop_info[['Country (or dependency)', 'Population (2020)', 'Density', 'Med. Age', 'Urban Pop %']]
pd.DataFrame.from_dict(country_lookup)
train_df_pop = pd.merge(train_df, country_lookup, how='left', left_on='Country_Region', right_on='Country (or dependency)')
test_df_pop = pd.merge(test_df, country_lookup, how='left', left_on='Country_Region', right_on='Country (or dependency)')
test_df_pop.loc[test_df_pop['Country_Region'] == 'US', ['Population (2020)']] = 331002651
test_df_pop.loc[test_df_pop['Country_Region'] == 'US', ['Density']] = 36
test_df_pop.loc[test_df_pop['Country_Region'] == 'US', ['Med. Age']] = 38
test_df_pop.loc[test_df_pop['Country_Region'] == 'US', ['Urban Pop %']] = '83%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Burma', ['Population (2020)']] = 54409800
test_df_pop.loc[test_df_pop['Country_Region'] == 'Burma', ['Density']] = 83
test_df_pop.loc[test_df_pop['Country_Region'] == 'Burma', ['Med. Age']] = 29
test_df_pop.loc[test_df_pop['Country_Region'] == 'Burma', ['Urban Pop %']] = '39%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Population (2020)']] = 219159
test_df_pop.loc[test_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Density']] = 228
test_df_pop.loc[test_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Med. Age']] = 19
test_df_pop.loc[test_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Urban Pop %']] = '74%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'West Bank and Gaza', ['Population (2020)']] = 3340143
test_df_pop.loc[test_df_pop['Country_Region'] == 'West Bank and Gaza', ['Density']] = 759
test_df_pop.loc[test_df_pop['Country_Region'] == 'West Bank and Gaza', ['Med. Age']] = 17
test_df_pop.loc[test_df_pop['Country_Region'] == 'West Bank and Gaza', ['Urban Pop %']] = '76%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Kosovo', ['Population (2020)']] = 1810463
test_df_pop.loc[test_df_pop['Country_Region'] == 'Kosovo', ['Density']] = 159
test_df_pop.loc[test_df_pop['Country_Region'] == 'Kosovo', ['Med. Age']] = 29
test_df_pop.loc[test_df_pop['Country_Region'] == 'Kosovo', ['Urban Pop %']] = '55%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Korea, South', ['Population (2020)']] = 51269185
test_df_pop.loc[test_df_pop['Country_Region'] == 'Korea, South', ['Density']] = 527
test_df_pop.loc[test_df_pop['Country_Region'] == 'Korea, South', ['Med. Age']] = 44
test_df_pop.loc[test_df_pop['Country_Region'] == 'Korea, South', ['Urban Pop %']] = '82%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Czechia', ['Population (2020)']] = 10708981
test_df_pop.loc[test_df_pop['Country_Region'] == 'Czechia', ['Density']] = 139
test_df_pop.loc[test_df_pop['Country_Region'] == 'Czechia', ['Med. Age']] = 43
test_df_pop.loc[test_df_pop['Country_Region'] == 'Czechia', ['Urban Pop %']] = '74%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Taiwan*', ['Population (2020)']] = 23816775
test_df_pop.loc[test_df_pop['Country_Region'] == 'Taiwan*', ['Density']] = 673
test_df_pop.loc[test_df_pop['Country_Region'] == 'Taiwan*', ['Med. Age']] = 42
test_df_pop.loc[test_df_pop['Country_Region'] == 'Taiwan*', ['Urban Pop %']] = '79%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Population (2020)']] = 89561403
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Density']] = 40
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Med. Age']] = 17
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Urban Pop %']] = '46%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Population (2020)']] = 5518087
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Density']] = 16
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Med. Age']] = 19
test_df_pop.loc[test_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Urban Pop %']] = '70%'
test_df_pop.loc[test_df_pop['Country_Region'] == "Cote d'Ivoire", ['Population (2020)']] = 26378274
test_df_pop.loc[test_df_pop['Country_Region'] == "Cote d'Ivoire", ['Density']] = 83
test_df_pop.loc[test_df_pop['Country_Region'] == "Cote d'Ivoire", ['Med. Age']] = 19
test_df_pop.loc[test_df_pop['Country_Region'] == "Cote d'Ivoire", ['Urban Pop %']] = '51%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Population (2020)']] = 53199
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Density']] = 205
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Med. Age']] = 36
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Urban Pop %']] = '33%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Population (2020)']] = 110940
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Density']] = 284
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Med. Age']] = 33
test_df_pop.loc[test_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Urban Pop %']] = '53%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'Diamond Princess', ['Population (2020)']] = 3770
test_df_pop.loc[test_df_pop['Country_Region'] == 'Diamond Princess', ['Density']] = 3770
test_df_pop.loc[test_df_pop['Country_Region'] == 'Diamond Princess', ['Med. Age']] = 62
test_df_pop.loc[test_df_pop['Country_Region'] == 'Diamond Princess', ['Urban Pop %']] = '100%'
test_df_pop.loc[test_df_pop['Country_Region'] == 'MS Zaandam', ['Population (2020)']] = 1432
test_df_pop.loc[test_df_pop['Country_Region'] == 'MS Zaandam', ['Density']] = 1432
test_df_pop.loc[test_df_pop['Country_Region'] == 'MS Zaandam', ['Med. Age']] = 65
test_df_pop.loc[test_df_pop['Country_Region'] == 'MS Zaandam', ['Urban Pop %']] = '100%'
test_df_pop.isnull().sum() | code |
32071774/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.rename(columns={'Density (P/Km²)': 'Density'}, inplace=True)
pop_info.columns
country_lookup = pop_info[['Country (or dependency)', 'Population (2020)', 'Density', 'Med. Age', 'Urban Pop %']]
pd.DataFrame.from_dict(country_lookup)
train_df_pop = pd.merge(train_df, country_lookup, how='left', left_on='Country_Region', right_on='Country (or dependency)')
train_df_pop.loc[train_df_pop['Country_Region'] == 'US', ['Population (2020)']] = 331002651
train_df_pop.loc[train_df_pop['Country_Region'] == 'US', ['Density']] = 36
train_df_pop.loc[train_df_pop['Country_Region'] == 'US', ['Med. Age']] = 38
train_df_pop.loc[train_df_pop['Country_Region'] == 'US', ['Urban Pop %']] = '83%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Burma', ['Population (2020)']] = 54409800
train_df_pop.loc[train_df_pop['Country_Region'] == 'Burma', ['Density']] = 83
train_df_pop.loc[train_df_pop['Country_Region'] == 'Burma', ['Med. Age']] = 29
train_df_pop.loc[train_df_pop['Country_Region'] == 'Burma', ['Urban Pop %']] = '39%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Population (2020)']] = 219159
train_df_pop.loc[train_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Density']] = 228
train_df_pop.loc[train_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Med. Age']] = 19
train_df_pop.loc[train_df_pop['Country_Region'] == 'Sao Tome and Principe', ['Urban Pop %']] = '74%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'West Bank and Gaza', ['Population (2020)']] = 3340143
train_df_pop.loc[train_df_pop['Country_Region'] == 'West Bank and Gaza', ['Density']] = 759
train_df_pop.loc[train_df_pop['Country_Region'] == 'West Bank and Gaza', ['Med. Age']] = 17
train_df_pop.loc[train_df_pop['Country_Region'] == 'West Bank and Gaza', ['Urban Pop %']] = '76%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Kosovo', ['Population (2020)']] = 1810463
train_df_pop.loc[train_df_pop['Country_Region'] == 'Kosovo', ['Density']] = 159
train_df_pop.loc[train_df_pop['Country_Region'] == 'Kosovo', ['Med. Age']] = 29
train_df_pop.loc[train_df_pop['Country_Region'] == 'Kosovo', ['Urban Pop %']] = '55%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Korea, South', ['Population (2020)']] = 51269185
train_df_pop.loc[train_df_pop['Country_Region'] == 'Korea, South', ['Density']] = 527
train_df_pop.loc[train_df_pop['Country_Region'] == 'Korea, South', ['Med. Age']] = 44
train_df_pop.loc[train_df_pop['Country_Region'] == 'Korea, South', ['Urban Pop %']] = '82%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Czechia', ['Population (2020)']] = 10708981
train_df_pop.loc[train_df_pop['Country_Region'] == 'Czechia', ['Density']] = 139
train_df_pop.loc[train_df_pop['Country_Region'] == 'Czechia', ['Med. Age']] = 43
train_df_pop.loc[train_df_pop['Country_Region'] == 'Czechia', ['Urban Pop %']] = '74%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Taiwan*', ['Population (2020)']] = 23816775
train_df_pop.loc[train_df_pop['Country_Region'] == 'Taiwan*', ['Density']] = 673
train_df_pop.loc[train_df_pop['Country_Region'] == 'Taiwan*', ['Med. Age']] = 42
train_df_pop.loc[train_df_pop['Country_Region'] == 'Taiwan*', ['Urban Pop %']] = '79%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Population (2020)']] = 89561403
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Density']] = 40
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Med. Age']] = 17
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Kinshasa)', ['Urban Pop %']] = '46%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Population (2020)']] = 5518087
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Density']] = 16
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Med. Age']] = 19
train_df_pop.loc[train_df_pop['Country_Region'] == 'Congo (Brazzaville)', ['Urban Pop %']] = '70%'
train_df_pop.loc[train_df_pop['Country_Region'] == "Cote d'Ivoire", ['Population (2020)']] = 26378274
train_df_pop.loc[train_df_pop['Country_Region'] == "Cote d'Ivoire", ['Density']] = 83
train_df_pop.loc[train_df_pop['Country_Region'] == "Cote d'Ivoire", ['Med. Age']] = 19
train_df_pop.loc[train_df_pop['Country_Region'] == "Cote d'Ivoire", ['Urban Pop %']] = '51%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Population (2020)']] = 53199
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Density']] = 205
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Med. Age']] = 36
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Kitts and Nevis', ['Urban Pop %']] = '33%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Population (2020)']] = 110940
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Density']] = 284
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Med. Age']] = 33
train_df_pop.loc[train_df_pop['Country_Region'] == 'Saint Vincent and the Grenadines', ['Urban Pop %']] = '53%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'Diamond Princess', ['Population (2020)']] = 3770
train_df_pop.loc[train_df_pop['Country_Region'] == 'Diamond Princess', ['Density']] = 3770
train_df_pop.loc[train_df_pop['Country_Region'] == 'Diamond Princess', ['Med. Age']] = 62
train_df_pop.loc[train_df_pop['Country_Region'] == 'Diamond Princess', ['Urban Pop %']] = '100%'
train_df_pop.loc[train_df_pop['Country_Region'] == 'MS Zaandam', ['Population (2020)']] = 1432
train_df_pop.loc[train_df_pop['Country_Region'] == 'MS Zaandam', ['Density']] = 1432
train_df_pop.loc[train_df_pop['Country_Region'] == 'MS Zaandam', ['Med. Age']] = 65
train_df_pop.loc[train_df_pop['Country_Region'] == 'MS Zaandam', ['Urban Pop %']] = '100%'
train_df_pop.isnull().sum() | code |
32071774/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
train_df.info() | code |
32071774/cell_2 | [
"text_plain_output_1.png"
] | pip install pycountry_convert | code |
32071774/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32071774/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
test_df.info() | code |
32071774/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
print('Min train date: ', train_df['Date'].min())
print('Max train date: ', train_df['Date'].max())
print('Min test date: ', test_df['Date'].min())
print('Max test date: ', test_df['Date'].max()) | code |
32071774/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.rename(columns={'Density (P/Km²)': 'Density'}, inplace=True)
pop_info.columns
country_lookup = pop_info[['Country (or dependency)', 'Population (2020)', 'Density', 'Med. Age', 'Urban Pop %']]
pd.DataFrame.from_dict(country_lookup)
train_df_pop = pd.merge(train_df, country_lookup, how='left', left_on='Country_Region', right_on='Country (or dependency)')
train_df_pop.info() | code |
32071774/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.rename(columns={'Density (P/Km²)': 'Density'}, inplace=True)
pop_info.columns
country_lookup = pop_info[['Country (or dependency)', 'Population (2020)', 'Density', 'Med. Age', 'Urban Pop %']]
country_lookup.head() | code |
32071774/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.head() | code |
32071774/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
pop_info = pd.read_csv('../input/population-by-country-2020/population_by_country_2020.csv')
pop_info.rename(columns={'Density (P/Km²)': 'Density'}, inplace=True)
pop_info.columns | code |
32071774/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv')
test_df = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv')
train_df.head() | code |
49125428/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2']=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
gender=data[data['Q2']!='x'].groupby('Q2')['Q1'].value_counts().unstack()
man=gender.loc['Man']
woman=gender.loc['Woman']
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.bar(man.index,man)
ax.bar(woman.index,woman)
plt.show()
dataQ3=data['Q3'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ3.index,dataQ3)
plt.show()
dataQ4=data['Q4'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ4.index,dataQ4)
plt.show()
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues')
# pd.DataFrame(question).iloc[6]
dataQ6=data['Q6'][data['Q6'].notnull()].value_counts()
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
ax.bar(dataQ6.index,dataQ6)
ax.set_xticklabels(dataQ6.index,Rotation=59)
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
sns.heatmap(data[data['Q6'].notnull()].groupby('Q6')['Q4'].value_counts().unstack(), ax=ax, square=True, annot=True, fmt='d', cmap='Blues')
plt.show() | code |
49125428/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2 = data['Q2'].apply(lambda x: 'x' if x not in ['Man', 'Woman'] else x)
fig, ax = plt.subplots(1, 1, figsize=(8, 5), dpi=200)
dataQ2 = dataQ2.loc[dataQ2 != 'x'].value_counts()
ax.bar(dataQ2.index, dataQ2)
plt.show() | code |
49125428/cell_4 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape | code |
49125428/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2']=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
gender=data[data['Q2']!='x'].groupby('Q2')['Q1'].value_counts().unstack()
man=gender.loc['Man']
woman=gender.loc['Woman']
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.bar(man.index,man)
ax.bar(woman.index,woman)
plt.show()
dataQ3=data['Q3'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ3.index,dataQ3)
plt.show()
dataQ4=data['Q4'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ4.index,dataQ4)
plt.show()
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues')
# pd.DataFrame(question).iloc[6]
dataQ6=data['Q6'][data['Q6'].notnull()].value_counts()
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
ax.bar(dataQ6.index,dataQ6)
ax.set_xticklabels(dataQ6.index,Rotation=59)
plt.show()
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
sns.heatmap(data[data['Q6'].notnull()].groupby('Q6')['Q4'].value_counts().unstack(),
ax=ax,square=True,annot=True,fmt='d',cmap='Blues')
plt.show()
data.loc[data['Q7_Part_1'].notnull() & data['Q7_Part_2'] & data['Q7_Part_3']].loc[:, ['Q7_Part_1', 'Q7_Part_2', 'Q7_Part_3']].value_counts()
dataQ8 = data['Q8'].value_counts()
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
ax.barh(dataQ8.index, dataQ8)
plt.show() | code |
49125428/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2']=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
gender=data[data['Q2']!='x'].groupby('Q2')['Q1'].value_counts().unstack()
man=gender.loc['Man']
woman=gender.loc['Woman']
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.bar(man.index,man)
ax.bar(woman.index,woman)
plt.show()
dataQ3=data['Q3'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ3.index,dataQ3)
plt.show()
dataQ4=data['Q4'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ4.index,dataQ4)
plt.show()
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues')
dataQ6 = data['Q6'][data['Q6'].notnull()].value_counts()
fig, ax = plt.subplots(1, 1, figsize=(8, 5), dpi=200)
ax.bar(dataQ6.index, dataQ6)
ax.set_xticklabels(dataQ6.index, Rotation=59)
plt.show() | code |
49125428/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
datainfo(data) | code |
49125428/cell_2 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') | code |
49125428/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig, ax = plt.subplots(1, 1, figsize=(8, 5), dpi=200)
dataQ1 = data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index, dataQ1)
plt.show() | code |
49125428/cell_19 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues') | code |
49125428/cell_7 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
data.head() | code |
49125428/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2']=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
gender=data[data['Q2']!='x'].groupby('Q2')['Q1'].value_counts().unstack()
man=gender.loc['Man']
woman=gender.loc['Woman']
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.bar(man.index,man)
ax.bar(woman.index,woman)
plt.show()
dataQ3=data['Q3'].value_counts()[:10]
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.barh(dataQ3.index,dataQ3)
plt.show()
dataQ4 = data['Q4'].value_counts()[:10]
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
ax.barh(dataQ4.index, dataQ4)
plt.show() | code |
49125428/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2']=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
gender=data[data['Q2']!='x'].groupby('Q2')['Q1'].value_counts().unstack()
man=gender.loc['Man']
woman=gender.loc['Woman']
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=200)
ax.bar(man.index,man)
ax.bar(woman.index,woman)
plt.show()
dataQ3 = data['Q3'].value_counts()[:10]
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
ax.barh(dataQ3.index, dataQ3)
plt.show() | code |
49125428/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.head() | code |
49125428/cell_24 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues')
data.loc[data['Q7_Part_1'].notnull() & data['Q7_Part_2'] & data['Q7_Part_3']].loc[:, ['Q7_Part_1', 'Q7_Part_2', 'Q7_Part_3']].value_counts()
dataQ9 = data[[col for col in data.columns if 'Q9' in col]]
dataQ9_reverse_dummies = pd.Series()
for col in dataQ9.columns:
dataQ9_reverse_dummies[dataQ9[col].value_counts().index[0].strip()] = dataQ9[col].value_counts().values[0] | code |
49125428/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
question = data.iloc[0, :]
data = data.iloc[1:, :]
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ1=data['Q1'].value_counts().sort_index()
ax.bar(dataQ1.index,dataQ1)
plt.show()
dataQ2=data['Q2'].apply(lambda x:'x' if x not in ['Man','Woman'] else x)
fig,ax=plt.subplots(1,1,figsize=(8,5),dpi=200)
dataQ2=dataQ2.loc[dataQ2!='x'].value_counts()
ax.bar(dataQ2.index,dataQ2)
plt.show()
data['Q2'] = data['Q2'].apply(lambda x: 'x' if x not in ['Man', 'Woman'] else x)
gender = data[data['Q2'] != 'x'].groupby('Q2')['Q1'].value_counts().unstack()
man = gender.loc['Man']
woman = gender.loc['Woman']
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
ax.bar(man.index, man)
ax.bar(woman.index, woman)
plt.show() | code |
49125428/cell_22 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv')
data.shape
def datainfo(df):
return pd.DataFrame([(col, df[col].nunique(), df[col].isna().sum(), df[col].dtype, df[col].unique()[:5]) for col in df.columns], columns=['name', 'nunique', 'missing', 'dtype', 'values :5'])
question = data.iloc[0, :]
data = data.iloc[1:, :]
pd.options.display.float_format = lambda x: '{:.0f}'.format(x) if int(x) == x else '{:,.2f}'.format(x)
data.groupby('Q3')['Q5'].value_counts().unstack().style.background_gradient(cmap='Blues')
data.loc[data['Q7_Part_1'].notnull() & data['Q7_Part_2'] & data['Q7_Part_3']].loc[:, ['Q7_Part_1', 'Q7_Part_2', 'Q7_Part_3']].value_counts() | code |
50234958/cell_4 | [
"text_html_output_1.png"
] | from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
bert_version = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(bert_version) | code |
50234958/cell_6 | [
"text_plain_output_3.png",
"text_plain_output_2.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip')
submission = pd.read_csv('/kaggle/input/quora-question-pairs/sample_submission.csv.zip')
submission.head() | code |
50234958/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50234958/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/quora-question-pairs/train.csv.zip')
train_df.head() | code |
2016683/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
INPUT_PATH = '../input/'
childPref = pd.read_csv(INPUT_PATH + 'child_wishlist.csv', header=None).as_matrix()[:, 1:]
santaPref = pd.read_csv(INPUT_PATH + 'gift_goodkids.csv', header=None).as_matrix()[:, 1:]
numChildren = childPref.shape[0]
numGifts = santaPref.shape[0]
numGiftsPerChild = numChildren / numGifts
numTwins = 4000
print('creating child vs. gift value matrix...')
childPreferenceMatrix = -1 * np.ones((numChildren, numGifts), np.float32)
for childID in range(numChildren):
for giftOrder, giftID in enumerate(childPref[childID, :]):
childPreferenceMatrix[childID, giftID] = 2 * (10 - giftOrder)
santaPreferenceMatrix = -1 * np.ones((numChildren, numGifts), np.float32)
for giftID in range(numGifts):
for childOrder, childID in enumerate(santaPref[giftID, :]):
santaPreferenceMatrix[childID, giftID] = 2 * (1000 - childOrder)
child_vs_gift_matrix = childPreferenceMatrix / (20.0 * numChildren)
child_vs_gift_matrix += santaPreferenceMatrix / (2000000.0 * numGifts)
child_vs_gift_matrix = child_vs_gift_matrix.astype(np.float32)
del childPreferenceMatrix
del santaPreferenceMatrix | code |
2016683/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
print('loading data...')
INPUT_PATH = '../input/'
childPref = pd.read_csv(INPUT_PATH + 'child_wishlist.csv', header=None).as_matrix()[:, 1:]
santaPref = pd.read_csv(INPUT_PATH + 'gift_goodkids.csv', header=None).as_matrix()[:, 1:] | code |
2016683/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
INPUT_PATH = '../input/'
childPref = pd.read_csv(INPUT_PATH + 'child_wishlist.csv', header=None).as_matrix()[:, 1:]
santaPref = pd.read_csv(INPUT_PATH + 'gift_goodkids.csv', header=None).as_matrix()[:, 1:]
numChildren = childPref.shape[0]
numGifts = santaPref.shape[0]
numGiftsPerChild = numChildren / numGifts
numTwins = 4000
childPreferenceMatrix = -1 * np.ones((numChildren, numGifts), np.float32)
for childID in range(numChildren):
for giftOrder, giftID in enumerate(childPref[childID, :]):
childPreferenceMatrix[childID, giftID] = 2 * (10 - giftOrder)
santaPreferenceMatrix = -1 * np.ones((numChildren, numGifts), np.float32)
for giftID in range(numGifts):
for childOrder, childID in enumerate(santaPref[giftID, :]):
santaPreferenceMatrix[childID, giftID] = 2 * (1000 - childOrder)
child_vs_gift_matrix = childPreferenceMatrix / (20.0 * numChildren)
child_vs_gift_matrix += santaPreferenceMatrix / (2000000.0 * numGifts)
child_vs_gift_matrix = child_vs_gift_matrix.astype(np.float32)
del childPreferenceMatrix
del santaPreferenceMatrix
def calculateTotalHapiness(pred):
totalHapiness = 0
for i in range(0, numTwins, 2):
child_id = i
gift_id = pred[i]
totalHapiness += child_vs_gift_matrix[child_id, gift_id]
for i in range(numTwins, numChildren):
child_id = i
gift_id = pred[i]
totalHapiness += child_vs_gift_matrix[child_id, gift_id]
return totalHapiness
def AssignGifts_GreedyChildren_Adaptive(child_vs_gift_selection_matrix=child_vs_gift_matrix, numPasses=15):
child_vs_gift_selection_matrix = child_vs_gift_selection_matrix.copy()
giftAssignment = -np.ones(numChildren, dtype=np.int32)
giftCount = np.zeros(numGifts, dtype=np.int32)
startTime = time.time()
sortedTwins = 2 * child_vs_gift_selection_matrix[0:numTwins:2].max(axis=1).argsort()
for childInd in sortedTwins:
selectedGift = child_vs_gift_selection_matrix[childInd, :].argmax()
if giftCount[selectedGift] < numGiftsPerChild and giftAssignment[childInd] == -1:
giftAssignment[childInd] = selectedGift
giftAssignment[childInd + 1] = selectedGift
giftCount[selectedGift] += 2
childrenPerPass = int(1 + (numChildren - numTwins) / (numPasses + 1.0))
for k in range(numPasses + 1):
maxValuePerChild = child_vs_gift_selection_matrix.max(axis=1)
sortedChildren = (numTwins + maxValuePerChild[numTwins:].argsort())[::-1]
thresholdChildInd = min(numChildren - numTwins - 1, int(numTwins + (k + 1) * childrenPerPass))
assignmentThreshold = maxValuePerChild[thresholdChildInd]
numAssignedSoFar = (giftAssignment > -1).sum()
if numAssignedSoFar > 0.99 * numChildren or k >= numPasses:
assignmentThreshold = child_vs_gift_selection_matrix.min() - 1.0
thresholdChildInd = len(sortedChildren)
if numAssignedSoFar >= numChildren:
break
for childInd in sortedChildren[:thresholdChildInd]:
if giftAssignment[childInd] == -1 and child_vs_gift_selection_matrix[childInd, :].max() >= assignmentThreshold:
selectedGift = child_vs_gift_selection_matrix[childInd, :].argmax()
giftAssignment[childInd] = selectedGift
giftCount[selectedGift] += 1
if giftCount[selectedGift] >= numGiftsPerChild:
child_vs_gift_selection_matrix[:, selectedGift] = -1.0
assignmentScore = calculateTotalHapiness(giftAssignment)
return (giftAssignment, assignmentScore)
print('normalizing rows and columns of the value matrix')
startingMatrix = child_vs_gift_matrix.copy()
startingMatrix -= startingMatrix.min()
startingMatrix /= startingMatrix.max()
print('heuristic #1: boost unpopular gifts before greedy assignment')
giftVariability = startingMatrix.std(axis=0)
for giftInd, giftVar in enumerate(giftVariability):
startingMatrix[:, giftInd] /= giftVar
print('heuristic #2: penalize good kids before greedy assignment')
childFavorability = startingMatrix.mean(axis=1)
for childInd, childFavor in enumerate(childFavorability):
startingMatrix[childInd, :] /= childFavor
print('assign gifts to children in a child centered greedy fashion')
pred, score = AssignGifts_GreedyChildren_Adaptive(startingMatrix, numPasses=16)
print('predicted score = %.8f' % score) | code |
2016683/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | out = open('submission_heuristic.csv', 'w')
out.write('ChildId,GiftId\n')
for i in range(len(pred)):
out.write(str(i) + ',' + str(pred[i]) + '\n')
out.close() | code |
90122454/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_test = pd.read_csv(dirname + '/' + filenames[1])
data_train = pd.read_csv(dirname + '/' + filenames[0])
data_train.Name.describe() | code |
90122454/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_test = pd.read_csv(dirname + '/' + filenames[1])
data_train = pd.read_csv(dirname + '/' + filenames[0])
data_train['Sex'].value_counts() | code |
90122454/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_test = pd.read_csv(dirname + '/' + filenames[1])
data_train = pd.read_csv(dirname + '/' + filenames[0])
data_train.Age.kurt()
data_train.Age.skew()
sns.displot(data=data_train.Age, kde=True) | code |