path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
18143144/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
np.round(342 * 100 / 891) | code |
18143144/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
train_set[['Sex', 'Survived']].groupby(['Sex']).mean() | code |
18143144/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.info() | code |
18143144/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
train_set[['Sex', 'Survived']].groupby(['Sex']).mean().plot(kind='bar') | code |
18143144/cell_35 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
g = sns.FacetGrid(train_set, col='Survived')
g.map(plt.hist, 'Age', bins=20)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
grid = sns.FacetGrid(train_set, col='Survived')
grid.map(plt.hist, 'Fare')
grid = sns.FacetGrid(train_set, row='Embarked', col='Survived', size=2.2, aspect=1.6)
grid.map(plt.bar, 'Sex', 'Fare')
grid = sns.FacetGrid(train_set, col='Survived', row='Pclass')
grid.map(plt.hist, 'Age')
grid = sns.FacetGrid(train_set, col='Survived', row='Embarked')
grid.map(plt.bar, 'Sex', 'Fare') | code |
18143144/cell_31 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
train_set[['Survived', 'Embarked']].groupby('Embarked').mean() | code |
18143144/cell_46 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
X_train = train_set.drop(['Survived', 'PassengerId', 'Name', 'Parch', 'Ticket', 'Cabin'], axis=1)
X_train = X_train.values
labelEncoder = LabelEncoder()
X_train[:, 5] = labelEncoder.fit_transform(X_train[:, 5])
print(X_train) | code |
18143144/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
train_set[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False) | code |
18143144/cell_22 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum() | code |
18143144/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
np.round(342 * 100 / 891)
np.round(549 * 100 / 891) | code |
18143144/cell_27 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
train_set[['Parch', 'Survived']].groupby(['Parch']).mean().sort_values(by='Survived', ascending=False) | code |
18143144/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
np.round(342 * 100 / 891)
np.round(549 * 100 / 891)
mean = np.round(train_set.Age.mean())
train_set['Age'] = train_set.Age.fillna(mean)
train_set.Age.isnull().sum()
train_set.head() | code |
18143144/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum()
train_set.Survived.value_counts()
train_set[['Pclass', 'Survived']].groupby(['Pclass']).mean().sort_values(by='Survived', ascending=False) | code |
18143144/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('../input/train.csv')
test_set = pd.read_csv('../input/test.csv')
train_set.isnull().sum() | code |
105178941/cell_4 | [
"text_plain_output_1.png"
] | first_name = ['adnan', 'afnan', 'affan']
last_name = ['k', 's', 'd']
l = len(first_name)
name = []
for i in range(l):
x = first_name[i] + last_name[i]
name.append(x)
first_name = ['adnan', 'afnan']
last_name = ['k', 'd']
l = len(first_name)
name = []
for i in range(l):
x = first_name[i] + ' ' + last_name[i]
name.append(x)
print(name) | code |
105178941/cell_1 | [
"text_plain_output_1.png"
] | first_name = ['adnan', 'afnan', 'affan']
last_name = ['k', 's', 'd']
l = len(first_name)
name = []
for i in range(l):
x = first_name[i] + last_name[i]
name.append(x)
print(name) | code |
105178941/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | b = [1, 2, 3, 4, 5, 6, 7]
num1 = int(input('enter the number to remove in the list'))
for i in b:
if i == num1:
b.remove(i)
print(b) | code |
73087591/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(-1, 10, 10, 1)
x_test = x_test.reshape(-1, 10, 10, 1)
print('x_train shape: ', x_train.shape)
print('y_train shape: ', y_train.shape)
print('x_test shape: ', x_test.shape)
print('y_test shape: ', y_test.shape) | code |
73087591/cell_20 | [
"text_plain_output_1.png"
] | from keras.layers.normalization import BatchNormalization
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.models import Sequential
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(-1, 10, 10, 1)
x_test = x_test.reshape(-1, 10, 10, 1)
batch_size = 128
epochs = 8
learning_rate = 0.01
activation = 'relu'
Fully_connected_layer_nodes = 86
input_shape = (10, 10, 1)
img_rows = 10
img_cols = 10
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
model = Sequential()
model.add(Conv2D(8, kernel_size=(1, 1), activation=activation, padding='SAME', input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 1)))
model.add(BatchNormalization())
model.add(Conv2D(36, (3, 3), activation=activation, padding='SAME'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(Fully_connected_layer_nodes, activation=activation))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(Fully_connected_layer_nodes, activation=activation))
model.add(BatchNormalization())
model.add(Dense(1, activation='linear'))
adam = tensorflow.keras.optimizers.Adam(lr=learning_rate)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
model.summary() | code |
73087591/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape)) | code |
73087591/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
print(f'There are {nRow} rows and {nCol} columns')
df.head() | code |
73087591/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73087591/cell_18 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(-1, 10, 10, 1)
x_test = x_test.reshape(-1, 10, 10, 1)
batch_size = 128
epochs = 8
learning_rate = 0.01
activation = 'relu'
Fully_connected_layer_nodes = 86
input_shape = (10, 10, 1)
img_rows = 10
img_cols = 10
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print('x_train shape: ', x_train.shape)
print('y_train shape: ', y_train.shape)
print('x_test shape: ', x_test.shape)
print('y_test shape: ', y_test.shape)
print('input_shape: ', input_shape) | code |
73087591/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
print('x_train shape: ', x_train.shape)
print('y_train shape: ', y_train.shape)
print('x_test shape: ', x_test.shape)
print('y_test shape: ', y_test.shape) | code |
73087591/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(-1, 10, 10, 1)
x_test = x_test.reshape(-1, 10, 10, 1)
fig = plt.figure(figsize=(21, 22))
label_index = y_train[:25].to_list()
for i in range(25):
plt.subplot(5, 5, 1 + i)
plt.title('Image Belongs to the label: ' + ' ' + str(label_index[i]), fontname='Times New Roman', fontweight='bold')
plt.imshow(x_train[i, :, :, 0], cmap=plt.get_cmap('gray'))
plt.show() | code |
73087591/cell_22 | [
"image_output_1.png"
] | from keras.layers.normalization import BatchNormalization
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.models import Sequential
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
y = df['Creditability']
X = df.iloc[:, :-1]
label = y
train = X
(print('train shape: ', train.shape), print('label shape: ', label.shape))
x_train, x_test, y_train, y_test = train_test_split(train, label, test_size=0.3, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(-1, 10, 10, 1)
x_test = x_test.reshape(-1, 10, 10, 1)
batch_size = 128
epochs = 8
learning_rate = 0.01
activation = 'relu'
Fully_connected_layer_nodes = 86
input_shape = (10, 10, 1)
img_rows = 10
img_cols = 10
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
model = Sequential()
model.add(Conv2D(8, kernel_size=(1, 1), activation=activation, padding='SAME', input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(1, 1)))
model.add(BatchNormalization())
model.add(Conv2D(36, (3, 3), activation=activation, padding='SAME'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(Fully_connected_layer_nodes, activation=activation))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(Fully_connected_layer_nodes, activation=activation))
model.add(BatchNormalization())
model.add(Dense(1, activation='linear'))
adam = tensorflow.keras.optimizers.Adam(lr=learning_rate)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
model.summary()
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[callback], verbose=1, validation_data=(x_test, y_test), shuffle=False)
val_loss, val_acc = model.evaluate(x_test, y_test)
print('validation loss: ', val_loss)
print('<3 ')
print('validation accuracy: ', val_acc)
print('learn rate: ', learning_rate, 'epochs: ', epochs, 'activation: ', activation, "Fully Connected Layer's Node Number :", Fully_connected_layer_nodes) | code |
73087591/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nRowsRead = 1000
df = pd.read_csv('../input/cusersmarildownloadsgermancsv/german.csv', delimiter=';', encoding='ISO-8859-2', nrows=nRowsRead)
df.dataframeName = 'german.csv'
nRow, nCol = df.shape
print('df shape is: ', df.shape)
y = df['Creditability']
X = df.iloc[:, :-1]
print(type(X), type(y))
print(X.shape, y.shape) | code |
1004511/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts.csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag]
data = []
for region in pre.team_region.unique():
data.append([])
for _, row in pre[pre.team_region == region].iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == False]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob']) | code |
1004511/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts.csv')
df.head() | code |
1004511/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts.csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag]
data = []
for region in pre.team_region.unique():
data.append([])
for _, row in pre[pre.team_region == region].iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == False]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match | code |
2040742/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
n_pixels = data.shape[1]
n_pixels
X_train = train[:, :(n_pixels + 1) // 2]
X_train | code |
2040742/cell_9 | [
"image_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces | code |
2040742/cell_4 | [
"text_plain_output_1.png"
] | from skimage.io import imshow
import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
firstImage = images[0]
imshow(firstImage) | code |
2040742/cell_6 | [
"image_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
targets < 30 | code |
2040742/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape | code |
2040742/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids
test = test[face_ids, :]
test.shape | code |
2040742/cell_19 | [
"text_plain_output_1.png"
] | from skimage.io import imshow
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.neighbors import KNeighborsRegressor
import matplotlib.pyplot as plt
import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids
test = test[face_ids, :]
test.shape
n_pixels = data.shape[1]
n_pixels
X_train = train[:, :(n_pixels + 1) // 2]
X_train
y_train = train[:, n_pixels // 2:]
y_train
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
ESTIMATORS = {'Extra trees': ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), 'K-nn': KNeighborsRegressor(), 'Linear regression': LinearRegression(), 'Ridge': RidgeCV(), 'RandomForestRegressor': RandomForestRegressor()}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
y_test_predict['RandomForestRegressor'].shape
image_shape = (64, 64)
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Ridge'][i]))
j = j + 1
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
j = j + 1
x = completed_face.reshape(image_shape)
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Extra trees'][i]))
j = j + 1
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
j = j + 1
x = completed_face.reshape(image_shape)
plt.figure(figsize=(2 * n_faces * 2, 5))
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Linear regression'][i]))
j = j + 1
plt.subplot(4, 5, j)
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
imshow(x)
j = j + 1
plt.subplot(4, 5, j)
x = completed_face.reshape(image_shape)
imshow(y)
plt.show() | code |
2040742/cell_7 | [
"image_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape | code |
2040742/cell_18 | [
"text_plain_output_1.png"
] | from skimage.io import imshow
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.neighbors import KNeighborsRegressor
import matplotlib.pyplot as plt
import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids
test = test[face_ids, :]
test.shape
n_pixels = data.shape[1]
n_pixels
X_train = train[:, :(n_pixels + 1) // 2]
X_train
y_train = train[:, n_pixels // 2:]
y_train
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
ESTIMATORS = {'Extra trees': ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), 'K-nn': KNeighborsRegressor(), 'Linear regression': LinearRegression(), 'Ridge': RidgeCV(), 'RandomForestRegressor': RandomForestRegressor()}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
y_test_predict['RandomForestRegressor'].shape
image_shape = (64, 64)
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Ridge'][i]))
j = j + 1
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
j = j + 1
x = completed_face.reshape(image_shape)
plt.figure(figsize=(2 * n_faces * 2, 5))
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Extra trees'][i]))
j = j + 1
plt.subplot(4, 5, j)
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
imshow(x)
j = j + 1
plt.subplot(4, 5, j)
x = completed_face.reshape(image_shape)
imshow(y)
plt.show() | code |
2040742/cell_8 | [
"image_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
test = data[targets >= 30]
test.shape | code |
2040742/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids
test = test[face_ids, :]
test.shape
n_pixels = data.shape[1]
n_pixels
X_train = train[:, :(n_pixels + 1) // 2]
X_train
y_train = train[:, n_pixels // 2:]
y_train
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
ESTIMATORS = {'Extra trees': ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), 'K-nn': KNeighborsRegressor(), 'Linear regression': LinearRegression(), 'Ridge': RidgeCV(), 'RandomForestRegressor': RandomForestRegressor()}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
y_test_predict['RandomForestRegressor'].shape | code |
2040742/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape | code |
2040742/cell_17 | [
"text_plain_output_1.png"
] | from skimage.io import imshow
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.neighbors import KNeighborsRegressor
import matplotlib.pyplot as plt
import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids
test = test[face_ids, :]
test.shape
n_pixels = data.shape[1]
n_pixels
X_train = train[:, :(n_pixels + 1) // 2]
X_train
y_train = train[:, n_pixels // 2:]
y_train
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
ESTIMATORS = {'Extra trees': ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), 'K-nn': KNeighborsRegressor(), 'Linear regression': LinearRegression(), 'Ridge': RidgeCV(), 'RandomForestRegressor': RandomForestRegressor()}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
y_test_predict['RandomForestRegressor'].shape
image_shape = (64, 64)
plt.figure(figsize=(2 * n_faces * 2, 5))
j = 0
for i in range(n_faces):
actual_face = test[i].reshape(image_shape)
completed_face = np.hstack((X_test[i], y_test_predict['Ridge'][i]))
j = j + 1
plt.subplot(4, 5, j)
y = actual_face.reshape(image_shape)
x = completed_face.reshape(image_shape)
imshow(x)
j = j + 1
plt.subplot(4, 5, j)
x = completed_face.reshape(image_shape)
imshow(y)
plt.show() | code |
2040742/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
train = data[targets < 30]
train.shape
n_pixels = data.shape[1]
n_pixels
y_train = train[:, n_pixels // 2:]
y_train | code |
2040742/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
targets = np.load('../input/olivetti_faces_target.npy')
targets.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
test = data[targets >= 30]
test.shape
n_faces = test.shape[0] // 10
n_faces
face_ids = np.random.randint(0, 100, size=n_faces)
face_ids | code |
2040742/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape
n_pixels = data.shape[1]
n_pixels | code |
2040742/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
images = np.load('../input/olivetti_faces.npy')
images.shape
data = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
data.shape | code |
1005671/cell_15 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, predictions)
print(accuracy) | code |
1005671/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd
dfTrain = pd.read_csv('../input/train.csv')
dfTest = pd.read_csv('../input/test.csv')
from sklearn.preprocessing import LabelEncoder
dfCombined = pd.concat([dfTrain, dfTest])
for feature in list(dfCombined):
le = LabelEncoder()
le.fit(dfCombined[feature])
if feature in dfTrain:
dfTrain[feature] = le.transform(dfTrain[feature])
if feature in dfTest:
dfTest[feature] = le.transform(dfTest[feature])
from sklearn.model_selection import train_test_split
X = dfTrain.drop(['Survived'], axis=1)
y = dfTrain['Survived']
num_test = 0.2
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=num_test, random_state=23)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
clf = RandomForestClassifier()
clf.fit(X, y)
dfTestPredictions = clf.predict(dfTest)
results = pd.DataFrame({'PassengerId': dfTest['PassengerId'], 'Survived': dfTestPredictions})
results.to_csv('results.csv', index=False)
results.head() | code |
1005671/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
dfTrain = pd.read_csv('../input/train.csv')
dfTest = pd.read_csv('../input/test.csv')
dfTrain.head() | code |
89125510/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data[(data['alcohol'] > 12) & (data['quality'] > 7)] | code |
89125510/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f, ax = plt.subplots(figsize=(18, 9))
sns.heatmap(data.corr(), annot=True, linewidths=5, fmt='.1f', ax=ax, linecolor='black') | code |
89125510/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.head() | code |
89125510/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns | code |
89125510/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data.plot(kind='scatter', x='citric_acid', y='fixed_acidity', alpha=0.7, color='red') | code |
89125510/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89125510/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
data.describe() | code |
89125510/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data['evaluation'] = ['Bad' if i < 3 else 'Good' if i < 7 else 'High quality' for i in data.quality]
data.loc[:15, ['quality', 'evaluation']] | code |
89125510/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data['evaluation'] = ['Bad' if i < 3 else 'Good' if i < 7 else 'High quality' for i in data.quality]
data.loc[:15, ['quality', 'evaluation']]
data['free_sulfur_dioxide'].value_counts() | code |
89125510/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.info() | code |
89125510/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data['evaluation'] = ['Bad' if i < 3 else 'Good' if i < 7 else 'High quality' for i in data.quality]
data.loc[:15, ['quality', 'evaluation']]
quality_list = list(data.quality.unique())
sulfur = []
for i in quality_list:
x = data[data.quality == i]
sulfur_rate = sum(x.quality) / len(x)
sulfur.append(sulfur_rate)
dataFrame = pd.DataFrame({'quality_list': quality_list, 'sulfur_ratio': sulfur})
new_index = dataFrame['sulfur_ratio'].sort_values(ascending=False).index.values
sorted_data2 = dataFrame.reindex(new_index)
plt.figure(figsize=(15, 10))
sns.barplot(x=sorted_data2['quality_list'], y=sorted_data2['sulfur_ratio'])
plt.xticks(rotation=90)
plt.xlabel('quality')
plt.ylabel('sulfur rate')
plt.show() | code |
89125510/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data.alcohol.plot(kind='hist', bins=50, figsize=(15, 15), color='red') | code |
89125510/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns | code |
89125510/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr()
data.columns
data = data.drop(['Id'], axis=1)
f,ax=plt.subplots(figsize=(18,9))
sns.heatmap(data.corr(),annot=True,linewidths=5,fmt='.1f',ax=ax,linecolor='black')
data.rename(columns={'fixed acidity': 'fixed_acidity', 'volatile acidity': 'volatile_acidity', 'citric acid': 'citric_acid', 'residual sugar': 'residual_sugar', 'free sulfur dioxide': 'free_sulfur_dioxide', 'total sulfur dioxide': 'total_sulfur_dioxide'}, inplace=True)
data.columns
data.alcohol.plot(kind='line', color='red', label='sulphates', linewidth=1, alpha=0.5, grid=True, linestyle=':')
data.quality.plot(color='g', label='quality', linewidth=1, alpha=0.5, grid=True, linestyle='-.')
plt.legend(loc='upper right')
plt.title('Line Plot')
plt.show() | code |
89125510/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/wine-quality-dataset/WineQT.csv')
data.corr() | code |
2041701/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
df_test = pd.read_csv('../input/test.csv')
print(df_test.info())
print(df_test.head()) | code |
2041701/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
print(df_train.head()) | code |
2041701/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
df_test = pd.read_csv('../input/test.csv')
df_test['sex_female'] = df_test['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_test['age_snr'] = df_test['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_test['age_mid'] = df_test['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_test['age_jnr'] = df_test['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_test['known_age'] = df_test['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_test.loc[df_test['known_age'] == 0, 'age_mid'] = 1
test = df_test.loc[:, ['PassengerId', 'Pclass', 'sex_female', 'age_jnr', 'known_age']]
test_std = sc.transform(test)
yy_test = lr.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
print(df_submission.head())
df_submission.to_csv('accountant_titanic_01.csv', index=False) | code |
2041701/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
def classif_func(data_train, label_train, data_valid, label_valid, classif):
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_Kne(data_train, label_train, data_valid, label_valid, k):
classif = KNeighborsClassifier(k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
for i in range(1, 30):
print(i, classif_Kne(X_train_std, y_train, X_valid_std, y_valid, i)) | code |
2041701/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
print(df_train.head()) | code |
2041701/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
print((y_valid != y_pred).sum())
print(accuracy_score(y_valid, y_pred)) | code |
2041701/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
train = df_train.drop(['PassengerId', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Age'], axis=1)
fig = plt.subplots(figsize=(20,10))
sns.heatmap(train.astype(float).corr(), annot=True, cmap='plasma') # my daugther favourite's color
from sklearn.cross_validation import train_test_split
X = df_train.loc[:, ['PassengerId', 'Pclass', 'sex_female', 'age_jnr', 'known_age']]
y = df_train['Survived']
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.3, random_state=0) | code |
2041701/cell_3 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2041701/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
def classif_func(data_train, label_train, data_valid, label_valid, classif):
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_Kne(data_train, label_train, data_valid, label_valid, k):
classif = KNeighborsClassifier(k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_SVC(data_train, label_train, data_valid, label_valid, k):
classif = SVC(gamma=2, C=k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
for i in range(1, 30):
print(i, classif_SVC(X_train_std, y_train, X_valid_std, y_valid, i)) | code |
2041701/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
classif_list = [SVC(kernel='linear', C=0.025), SVC(gamma=2, C=1), KNeighborsClassifier(10), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), AdaBoostClassifier(), GaussianNB()]
def classif_func(data_train, label_train, data_valid, label_valid, classif):
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
for classif in classif_list:
print(classif, classif_func(X_train_std, y_train, X_valid_std, y_valid, classif)) | code |
2041701/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
df_test = pd.read_csv('../input/test.csv')
df_test['sex_female'] = df_test['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_test['age_snr'] = df_test['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_test['age_mid'] = df_test['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_test['age_jnr'] = df_test['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_test['known_age'] = df_test['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_test.loc[df_test['known_age'] == 0, 'age_mid'] = 1
print(df_test.head()) | code |
2041701/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
print(df_train.describe()) | code |
2041701/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
df_test = pd.read_csv('../input/test.csv')
df_test['sex_female'] = df_test['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_test['age_snr'] = df_test['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_test['age_mid'] = df_test['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_test['age_jnr'] = df_test['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_test['known_age'] = df_test['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_test.loc[df_test['known_age'] == 0, 'age_mid'] = 1
test = df_test.loc[:, ['PassengerId', 'Pclass', 'sex_female', 'age_jnr', 'known_age']]
test_std = sc.transform(test)
yy_test = lr.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
df_submission.to_csv('accountant_titanic_01.csv', index=False)
def classif_func(data_train, label_train, data_valid, label_valid, classif):
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_Kne(data_train, label_train, data_valid, label_valid, k):
classif = KNeighborsClassifier(k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_SVC(data_train, label_train, data_valid, label_valid, k):
classif = SVC(gamma=2, C=k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
classif = KNeighborsClassifier(20)
classif.fit(X_train_std, y_train)
yy_test = classif.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
df_submission.to_csv('accountant_titanic_02.csv', index=False)
classif = SVC(gamma=2, C=3)
classif.fit(X_train_std, y_train)
yy_test = classif.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
print(df_submission.head())
df_submission.to_csv('accountant_titanic_03.csv', index=False) | code |
2041701/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
train = df_train.drop(['PassengerId', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked', 'Age'], axis=1)
fig = plt.subplots(figsize=(20, 10))
sns.heatmap(train.astype(float).corr(), annot=True, cmap='plasma') | code |
2041701/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
print(df_train.info()) | code |
2041701/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_train['sex_female'] = df_train['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_train['age_snr'] = df_train['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_train['age_mid'] = df_train['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_train['age_jnr'] = df_train['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_train['known_age'] = df_train['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_train.loc[df_train['known_age'] == 0, 'age_mid'] = 1
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_valid_std = sc.transform(X_valid)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
y_pred = lr.predict(X_valid_std)
df_test = pd.read_csv('../input/test.csv')
df_test['sex_female'] = df_test['Sex'].apply(lambda x: 1 if x == 'female' else 0)
df_test['age_snr'] = df_test['Age'].apply(lambda x: 1 if x >= 50 else 0)
df_test['age_mid'] = df_test['Age'].apply(lambda x: 1 if x > 10 and x < 50 else 0)
df_test['age_jnr'] = df_test['Age'].apply(lambda x: 1 if x <= 10 else 0)
df_test['known_age'] = df_test['Age'].apply(lambda x: 0 if pd.isnull(x) else 1)
df_test.loc[df_test['known_age'] == 0, 'age_mid'] = 1
test = df_test.loc[:, ['PassengerId', 'Pclass', 'sex_female', 'age_jnr', 'known_age']]
test_std = sc.transform(test)
yy_test = lr.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
df_submission.to_csv('accountant_titanic_01.csv', index=False)
def classif_func(data_train, label_train, data_valid, label_valid, classif):
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_Kne(data_train, label_train, data_valid, label_valid, k):
classif = KNeighborsClassifier(k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
def classif_SVC(data_train, label_train, data_valid, label_valid, k):
classif = SVC(gamma=2, C=k)
classif.fit(data_train, label_train)
y_pred = classif.predict(data_valid)
return ((label_valid != y_pred).sum(), accuracy_score(label_valid, y_pred))
classif = KNeighborsClassifier(20)
classif.fit(X_train_std, y_train)
yy_test = classif.predict(test_std)
df_submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': yy_test})
print(df_submission.head())
df_submission.to_csv('accountant_titanic_02.csv', index=False) | code |
90124505/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig,ax = plt.subplots(figsize = (10,7))
rapid_year = [2015,2010,2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar (gdp['Year'],gdp['Vietnam'], color = gdp['color_cats'], alpha = 0.5)
ax2 = ax.twinx()
ax2.plot( gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size =14)
fig, ax = plt.subplots(figsize =(10,7))
ax.plot(gdp['Year'], gdp['Indonesia %'], color = 'Blue', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Malatsia %'], color = 'Green', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Thailand %'], color = 'Gray', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Singapore %'], color = 'Yellow', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Vietnam %'], color = 'Red')
ax.set_xlabel('Year')
ax.set_ylabel('GDP Growth %')
ax.set_title('Vietnam and some other countries in SEA GDP growth rate', y = 1.1, size = 14)
ax.legend(['Indonesia','Malaysia','Thailand','Singapore','Vietnam'])
plt.show()
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
fig, ax = plt.subplots(figsize = (10,7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha = 0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom = pg['Urbanpop'], color = 'Green', alpha = 0.4)
ax.legend(['Urbanpop','Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
fig, ax = plt.subplots(figsize=(10, 7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha=0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom=pg['Urbanpop'], color='Gray', alpha=0.4)
ax.legend(['Urbanpop', 'Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
ax2 = ax.twinx()
ax2.plot(pg['Year'], pg['Population growth %'])
ax2.set_ylabel('% Population Growth')
plt.show() | code |
90124505/cell_4 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig,ax = plt.subplots(figsize = (10,7))
rapid_year = [2015,2010,2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar (gdp['Year'],gdp['Vietnam'], color = gdp['color_cats'], alpha = 0.5)
ax2 = ax.twinx()
ax2.plot( gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size =14)
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(gdp['Year'], gdp['Indonesia %'], color='Blue', alpha=0.3)
ax.plot(gdp['Year'], gdp['Malatsia %'], color='Green', alpha=0.3)
ax.plot(gdp['Year'], gdp['Thailand %'], color='Gray', alpha=0.3)
ax.plot(gdp['Year'], gdp['Singapore %'], color='Yellow', alpha=0.3)
ax.plot(gdp['Year'], gdp['Vietnam %'], color='Red')
ax.set_xlabel('Year')
ax.set_ylabel('GDP Growth %')
ax.set_title('Vietnam and some other countries in SEA GDP growth rate', y=1.1, size=14)
ax.legend(['Indonesia', 'Malaysia', 'Thailand', 'Singapore', 'Vietnam'])
plt.show() | code |
90124505/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig,ax = plt.subplots(figsize = (10,7))
rapid_year = [2015,2010,2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar (gdp['Year'],gdp['Vietnam'], color = gdp['color_cats'], alpha = 0.5)
ax2 = ax.twinx()
ax2.plot( gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size =14)
fig, ax = plt.subplots(figsize =(10,7))
ax.plot(gdp['Year'], gdp['Indonesia %'], color = 'Blue', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Malatsia %'], color = 'Green', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Thailand %'], color = 'Gray', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Singapore %'], color = 'Yellow', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Vietnam %'], color = 'Red')
ax.set_xlabel('Year')
ax.set_ylabel('GDP Growth %')
ax.set_title('Vietnam and some other countries in SEA GDP growth rate', y = 1.1, size = 14)
ax.legend(['Indonesia','Malaysia','Thailand','Singapore','Vietnam'])
plt.show()
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
fig, ax = plt.subplots(figsize = (10,7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha = 0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom = pg['Urbanpop'], color = 'Green', alpha = 0.4)
ax.legend(['Urbanpop','Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
fig, ax = plt.subplots(figsize = (10,7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha = 0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom = pg['Urbanpop'], color = 'Gray', alpha = 0.4)
ax.legend(['Urbanpop','Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
ax2 = ax.twinx()
ax2.plot(pg['Year'], pg['Population growth %'])
ax2.set_ylabel ('% Population Growth')
plt.show()
pdu = pd.merge(pg, edu, on='Year')
pdu1 = pdu.query('Year>=2002')
fig, ax = plt.subplots(figsize = (10, 7))
ax.bar(pdu1['Year'], pdu1['6_10yo'], alpha = 0.4)
ax.bar(pdu1['Year'], pdu1['Primary'], alpha = 0.4)
ax.set_ylabel('Millions people')
ax.set_title('Number of 6-10 years old children and Primary students in Vietnam', y = 1.1, size = 14)
ax.legend(['6-10 years old','Primary student'])
fig, ax = plt.subplots(figsize=(10, 7))
ax.bar(pdu1['Year'], pdu1['6_18yo'], alpha=0.4)
ax.bar(pdu1['Year'], pdu1['K-12'], alpha=0.4)
ax.set_ylabel('Millions people')
ax.set_title('Population in 16 - 18 years old and K-12 students', y=1.1, size=14)
ax.legend(['6-18 yo', 'K-12'])
plt.show() | code |
90124505/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
pop.head() | code |
90124505/cell_19 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig,ax = plt.subplots(figsize = (10,7))
rapid_year = [2015,2010,2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar (gdp['Year'],gdp['Vietnam'], color = gdp['color_cats'], alpha = 0.5)
ax2 = ax.twinx()
ax2.plot( gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size =14)
fig, ax = plt.subplots(figsize =(10,7))
ax.plot(gdp['Year'], gdp['Indonesia %'], color = 'Blue', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Malatsia %'], color = 'Green', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Thailand %'], color = 'Gray', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Singapore %'], color = 'Yellow', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Vietnam %'], color = 'Red')
ax.set_xlabel('Year')
ax.set_ylabel('GDP Growth %')
ax.set_title('Vietnam and some other countries in SEA GDP growth rate', y = 1.1, size = 14)
ax.legend(['Indonesia','Malaysia','Thailand','Singapore','Vietnam'])
plt.show()
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
fig, ax = plt.subplots(figsize = (10,7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha = 0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom = pg['Urbanpop'], color = 'Green', alpha = 0.4)
ax.legend(['Urbanpop','Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
fig, ax = plt.subplots(figsize = (10,7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha = 0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom = pg['Urbanpop'], color = 'Gray', alpha = 0.4)
ax.legend(['Urbanpop','Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions')
ax2 = ax.twinx()
ax2.plot(pg['Year'], pg['Population growth %'])
ax2.set_ylabel ('% Population Growth')
plt.show()
pdu = pd.merge(pg, edu, on='Year')
pdu1 = pdu.query('Year>=2002')
fig, ax = plt.subplots(figsize=(10, 7))
ax.bar(pdu1['Year'], pdu1['6_10yo'], alpha=0.4)
ax.bar(pdu1['Year'], pdu1['Primary'], alpha=0.4)
ax.set_ylabel('Millions people')
ax.set_title('Number of 6-10 years old children and Primary students in Vietnam', y=1.1, size=14)
ax.legend(['6-10 years old', 'Primary student']) | code |
90124505/cell_7 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape | code |
90124505/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
pdu = pd.merge(pg, edu, on='Year')
pdu1 = pdu.query('Year>=2002')
pdu1.head() | code |
90124505/cell_8 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
for i in Y:
p = np.sum(X[0][i - 10:i - 5])
k = np.sum(X[0][i - 18:i - 5])
P.append(p)
K.append(k)
print(P)
print(K) | code |
90124505/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
edu.head() | code |
90124505/cell_3 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig, ax = plt.subplots(figsize=(10, 7))
rapid_year = [2015, 2010, 2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar(gdp['Year'], gdp['Vietnam'], color=gdp['color_cats'], alpha=0.5)
ax2 = ax.twinx()
ax2.plot(gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size=14) | code |
90124505/cell_17 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
pdu = pd.merge(pg, edu, on='Year')
pdu.head() | code |
90124505/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
pg.head() | code |
90124505/cell_12 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
sns.set_style('whitegrid')
gdp = gdp.query('Year >= 1990')
fig,ax = plt.subplots(figsize = (10,7))
rapid_year = [2015,2010,2019]
gdp['color_cats'] = ['blue' if x in rapid_year else 'red' for x in gdp['Year']]
ax.bar (gdp['Year'],gdp['Vietnam'], color = gdp['color_cats'], alpha = 0.5)
ax2 = ax.twinx()
ax2.plot( gdp['Year'], gdp['Vietnam GDP PPP'])
ax.set_ylabel('GDP - Hundered Billion USD')
ax2.set_ylabel('GDP per person - USD')
ax.set_title('Vietnam GDP and GDP PPP 1990 - 2019', size =14)
fig, ax = plt.subplots(figsize =(10,7))
ax.plot(gdp['Year'], gdp['Indonesia %'], color = 'Blue', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Malatsia %'], color = 'Green', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Thailand %'], color = 'Gray', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Singapore %'], color = 'Yellow', alpha = 0.3)
ax.plot(gdp['Year'], gdp['Vietnam %'], color = 'Red')
ax.set_xlabel('Year')
ax.set_ylabel('GDP Growth %')
ax.set_title('Vietnam and some other countries in SEA GDP growth rate', y = 1.1, size = 14)
ax.legend(['Indonesia','Malaysia','Thailand','Singapore','Vietnam'])
plt.show()
X = np.array([pop['Newborn']])
Y = pop.index.tolist()
P = []
K = []
pop.shape
pg = pop.query('Year >= 1990')
fig, ax = plt.subplots(figsize=(10, 7))
ax.bar(pg['Year'], pg['Urbanpop'], alpha=0.4)
ax.bar(pg['Year'], pg['Rural Population'], bottom=pg['Urbanpop'], color='Green', alpha=0.4)
ax.legend(['Urbanpop', 'Ruralpop'])
ax.set_xlabel('Year')
ax.set_ylabel('Total population x 100 millions') | code |
90124505/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
pop = pd.read_csv('../input/vietnam-population-dgp-education-data/pop.csv')
edu = pd.read_csv('../input/vietnam-population-dgp-education-data/Vietnamstudent.csv')
gdp = pd.read_csv('../input/vietnam-population-dgp-education-data/GDPcompare.csv')
pisa = pd.read_csv('../input/vietnam-population-dgp-education-data/Pisa_GDP.csv')
pop.head() | code |
16150255/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # For loading and processing the dataset
df_train = pd.read_csv('../input/train.csv')
df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
X_train = df_train.drop('Survived', axis=1).as_matrix()
y_train = df_train['Survived'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2) | code |
16150255/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # For loading and processing the dataset
df_train = pd.read_csv('../input/train.csv')
df_train.head(5) | code |
16150255/cell_11 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # For loading and processing the dataset
df_train = pd.read_csv('../input/train.csv')
df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
X_train = df_train.drop('Survived', axis=1).as_matrix()
y_train = df_train['Survived'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2)
print(X_train.shape, y_train.shape) | code |
16150255/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # For loading and processing the dataset
df_train = pd.read_csv('../input/train.csv')
df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
print("Number of missing 'Age' values: {:d}".format(df_train['Age'].isnull().sum()))
df_train['Age'] = df_train['Age'].fillna(df_train['Age'].mean()) | code |
16150255/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # For loading and processing the dataset
df_train = pd.read_csv('../input/train.csv')
df_train = df_train.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
df_train.head() | code |
Subsets and Splits