path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129000049/cell_45
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) variance_score = model.score(test_X, test_y) prediction = np.round(prediction, 2) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) model = LogisticRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) print(results)
code
129000049/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape wdf_num.columns weth = wdf_num['2019':'2020'] weth.head()
code
129000049/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape
code
129000049/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape
code
129000049/cell_38
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) variance_score = model.score(test_X, test_y) prediction = np.round(prediction, 2) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) model = LogisticRegression() model.fit(train_X, train_y)
code
129000049/cell_17
[ "text_html_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape wdf_num.columns
code
129000049/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y)
code
129000049/cell_22
[ "text_html_output_1.png" ]
train_X.shape
code
1008454/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv') gTempCity = pd.read_csv('../input/GlobalLandTemperaturesByCity.csv') plt.figure(figsize=(12, 6)) gTemp.groupby(by='Year').mean()['LandAndOceanAverageTemperature'].dropna().plot()
code
1008454/cell_3
[ "text_html_output_1.png" ]
import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv') gTempCity = pd.read_csv('../input/GlobalLandTemperaturesByCity.csv') gTemp.head(5)
code
1008454/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd gTemp = pd.read_csv('../input/GlobalTemperatures.csv') gTempCountry = pd.read_csv('../input/GlobalLandTemperaturesByCountry.csv') gTempState = pd.read_csv('../input/GlobalLandTemperaturesByState.csv') gTempMajorCity = pd.read_csv('../input/GlobalLandTemperaturesByMajorCity.csv') gTempCity = pd.read_csv('../input/GlobalLandTemperaturesByCity.csv') gTemp.info()
code
330673/cell_13
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) featurecomponents = pd.DataFrame(featurecomponents, columns=['Principle Component 1', 'Principle Component 2']) df['Principle Component 1'] = featurecomponents['Principle Component 1'] featurecomponents['group_1'] = df['group_1'] groupslist = list(set(featurecomponents['group_1'].tolist())) group = featurecomponents[featurecomponents['group_1'] == groupslist[0]] group.plot(kind='scatter', x='Principle Component 1', y='Principle Component 2', figsize=(3, 3)) print('There are {} data points in this group.'.format(len(group.index)))
code
330673/cell_9
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) components.plot(kind='bar', figsize=(12, 4))
code
330673/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) print('It looks like {}% of the characteristics might be related to one another.'.format(len(flags) / len(chars) * 100))
code
330673/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) print('Before PCA the full size of the characteristics is {} features'.format(len(dums.columns.values)))
code
330673/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') print(df.head())
code
330673/cell_11
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) featurecomponents = pd.DataFrame(featurecomponents, columns=['Principle Component 1', 'Principle Component 2']) df['Principle Component 1'] = featurecomponents['Principle Component 1'] featurecomponents.plot(kind='scatter', x='Principle Component 1', y='Principle Component 2', figsize=(12, 12), s=1)
code
330673/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) print(pca.explained_variance_ratio_)
code
330673/cell_15
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) featurecomponents = pd.DataFrame(featurecomponents, columns=['Principle Component 1', 'Principle Component 2']) df['Principle Component 1'] = featurecomponents['Principle Component 1'] featurecomponents['group_1'] = df['group_1'] groupslist = list(set(featurecomponents['group_1'].tolist())) group = featurecomponents[featurecomponents['group_1'] == groupslist[0]] group = featurecomponents[featurecomponents['group_1'] == groupslist[5]] group = featurecomponents[featurecomponents['group_1'] == groupslist[6]] group.plot(kind='scatter', x='Principle Component 1', y='Principle Component 2', figsize=(3, 3)) print('There are {} data points in this group.'.format(len(group.index)))
code
330673/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) cares = [i / 100 for i in range(75, 100, 5)] for i in range(20, len(dums.columns.values)): pca = PCA(n_components=i) pca.fit(scaledums) try: if pca.explained_variance_ratio_.sum() > cares[0]: print("To explain {0} of the variance you'll need {1} components".format(cares[0], i)) cares = cares[1:] except: break
code
330673/cell_14
[ "text_plain_output_1.png" ]
from scipy.stats import chisquare from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd df = pd.read_csv('../input/people.csv') from scipy.stats import chisquare chars = [i for i in df.columns.values if 'char_' in i] flags = [] for feat in df[chars]: group = df[chars].groupby(feat) for otherfeat in df[chars].drop(feat, axis=1): summary = group[otherfeat].count() if chisquare(summary)[1] < 0.05: flags.append(feat) flags.append(otherfeat) flags = set(flags) dums = pd.get_dummies(df[chars]) from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler scaledums = MinMaxScaler().fit_transform(dums) pca = PCA(n_components=2) featurecomponents = pca.fit_transform(scaledums) import numpy as np components = {} index = 0 for feature in dums.columns.values: components[feature] = [pca.components_[0][index]] index += 1 sortedcomps = pca.components_[0] sortedcomps.sort() maxcap = sortedcomps[-3] mincap = sortedcomps[2] components = {i: x for i, x in components.items() if x >= maxcap or x <= mincap} components = pd.DataFrame(components) featurecomponents = pd.DataFrame(featurecomponents, columns=['Principle Component 1', 'Principle Component 2']) df['Principle Component 1'] = featurecomponents['Principle Component 1'] featurecomponents['group_1'] = df['group_1'] groupslist = list(set(featurecomponents['group_1'].tolist())) group = featurecomponents[featurecomponents['group_1'] == groupslist[0]] group = featurecomponents[featurecomponents['group_1'] == groupslist[5]] group.plot(kind='scatter', x='Principle Component 1', y='Principle Component 2', figsize=(3, 3)) print('There are {} data points in this group.'.format(len(group.index)))
code
325017/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd masterDF = pd.read_csv('../input/emails.csv') messageList = masterDF['message'].tolist() bodyList = [] for message in messageList: firstSplit = message.split('X-FileName: ', 1)[1] secondSplit = firstSplit.split('.') if len(secondSplit) > 1: secondSplit = secondSplit[1] body = ''.join(secondSplit)[4:] bodyList.append(body)
code
16135671/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_train_re)
code
16135671/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_test)
code
16135671/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
!pip install tensorflow-gpu import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import keras from keras.models import Model from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Activation from keras.datasets import cifar10
code
16135671/cell_3
[ "image_output_1.png" ]
from keras.datasets import cifar10 def load_images(): (x_train, _), (x_test, _) = cifar10.load_data() return (x_train, x_test) x_train, x_test = load_images()
code
16135671/cell_14
[ "image_output_1.png" ]
from keras.datasets import cifar10 from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Activation from keras.models import Model import keras import matplotlib.pyplot as plt def load_images(): (x_train, _), (x_test, _) = cifar10.load_data() return (x_train, x_test) def test_plot(x): pass def normalize(x_train, x_test): x_train = keras.utils.normalize(x_train) x_test = keras.utils.normalize(x_test) return (x_train, x_test) input_shape = x_train.shape[1:] receptive_field = (3, 3) pooling_field = (2, 2) def CONVautoencoder(x_train_re, x_test_re, epochs=200): input_img = Input(shape=(32, 32, 3)) x = Conv2D(64, (3, 3), padding='same')(input_img) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(32, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(16, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(16, (3, 3), padding='same')(encoded) x = BatchNormalization()(x) x = Activation('relu')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(32, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(64, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(3, (3, 3), padding='same')(x) x = BatchNormalization()(x) decoded = Activation('sigmoid')(x) autoencoder = Model(input_img, decoded) encoder = Model(input_img, encoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) autoencoder.fit(x_train_re, x_train_re, epochs=epochs, batch_size=250, validation_data=(x_test_re, x_test_re)) encoded_imgs = encoder.predict(x_test_re) predicted = autoencoder.predict(x_test_re) return (encoded_imgs, predicted) return (encoded_imgs, predicted) def plotting(x_test,encoded_imgs,predicted): plt.figure(figsize=(40, 4)) for i in range(10): # display original images ax = plt.subplot(3, 20, i + 1) plt.imshow(x_test[i].reshape(32, 32,3)) #plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstructed images ax = plt.subplot(3, 20, 2*20 +i+ 1) plt.imshow(predicted[i].reshape(32, 32,3)) #plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) def main(): x_train, x_test = load_images() x_train, x_test = normalize(x_train, x_test) encoded_imgs, predicted = CONVautoencoder(x_train_re, x_test_re) main()
code
16135671/cell_10
[ "text_plain_output_1.png" ]
import keras def normalize(x_train, x_test): x_train = keras.utils.normalize(x_train) x_test = keras.utils.normalize(x_test) return (x_train, x_test) print(x_train_re.shape[1:]) print(x_test_re.shape) input_shape = x_train.shape[1:] receptive_field = (3, 3) pooling_field = (2, 2)
code
16135671/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt def test_plot(x): pass test_plot(x_train)
code
122264859/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, fill_mode='nearest') test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow(train_images, train_labels, batch_size=50, shuffle=True) validation_generator = test_datagen.flow(test_images, test_labels, batch_size=50, shuffle=True) history = model.fit(train_generator, steps_per_epoch=750, epochs=100, validation_data=validation_generator, validation_steps=75, verbose=2) model.save('/kaggle/working/model_v2') model.evaluate(validation_generator)
code
122264859/cell_4
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras import numpy as np import pandas as pd mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) print(f'test set size: {test_size}') train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) print('Data ready')
code
122264859/cell_19
[ "image_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import cv2 import imutils import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, fill_mode='nearest') test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow(train_images, train_labels, batch_size=50, shuffle=True) validation_generator = test_datagen.flow(test_images, test_labels, batch_size=50, shuffle=True) history = model.fit(train_generator, steps_per_epoch=750, epochs=100, validation_data=validation_generator, validation_steps=75, verbose=2) model.save('/kaggle/working/model_v2') model.evaluate(validation_generator) model_path = '/kaggle/working/model_v2' model = load_model(model_path) image_path = '/kaggle/input/tester2/pja 4.jpg' image = cv2.imread(image_path) # perform edge detection, find contours in the edge map, and sort the # resulting contours from left-to-right edged = cv2.Canny(blurred, 30, 250) #low_threshold, high_threshold cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sort_contours(cnts, method="left-to-right")[0] figure = plt.figure(figsize=(7,7)) plt.axis('off'); plt.imshow(edged,cmap=cm.binary_r); chars = [] for c in cnts: x, y, w, h = cv2.boundingRect(c) roi = cropped[y:y + h, x:x + w] thresh = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] tH, tW = thresh.shape if tW > tH: thresh = imutils.resize(thresh, width=28) else: thresh = imutils.resize(thresh, height=28) tH, tW = thresh.shape dX = int(max(0, 28 - tW) / 2.0) dY = int(max(0, 28 - tH) / 2.0) padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY, left=dX, right=dX, borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)) padded = cv2.resize(padded, (28, 28)) padded = padded.astype('float32') / 255.0 padded = np.expand_dims(padded, axis=-1) chars.append((padded, (x, y, w, h))) # plot isolated characters n_cols = 10 n_rows = int(np.floor(len(chars)/ n_cols)+1) fig = plt.figure(figsize=(1.5*n_cols,1.5*n_rows)) for i,char in enumerate(chars): ax = plt.subplot(n_rows,n_cols,i+1) ax.imshow(char[0][:,:,0],cmap=cm.binary,aspect='auto') #plt.axis('off') plt.tight_layout() boxes = [b[1] for b in chars] chars = np.array([c[0] for c in chars], dtype='float32') preds = model.predict(chars) labelNames = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' image = cv2.imread(image_path) cropped = image[120:, :] for pred, (x, y, w, h) in zip(preds, boxes): i = np.argmax(pred) prob = pred[i] label = labelNames[i] label_text = f'{label},{prob * 100:.1f}%' cv2.rectangle(cropped, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(cropped, label_text, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) plt.figure(figsize=(15, 10)) plt.imshow(cropped)
code
122264859/cell_18
[ "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.models import load_model from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import cv2 import imutils import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, fill_mode='nearest') test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow(train_images, train_labels, batch_size=50, shuffle=True) validation_generator = test_datagen.flow(test_images, test_labels, batch_size=50, shuffle=True) history = model.fit(train_generator, steps_per_epoch=750, epochs=100, validation_data=validation_generator, validation_steps=75, verbose=2) model.save('/kaggle/working/model_v2') model.evaluate(validation_generator) model_path = '/kaggle/working/model_v2' model = load_model(model_path) image_path = '/kaggle/input/tester2/pja 4.jpg' image = cv2.imread(image_path) # perform edge detection, find contours in the edge map, and sort the # resulting contours from left-to-right edged = cv2.Canny(blurred, 30, 250) #low_threshold, high_threshold cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sort_contours(cnts, method="left-to-right")[0] figure = plt.figure(figsize=(7,7)) plt.axis('off'); plt.imshow(edged,cmap=cm.binary_r); chars = [] for c in cnts: x, y, w, h = cv2.boundingRect(c) roi = cropped[y:y + h, x:x + w] thresh = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] tH, tW = thresh.shape if tW > tH: thresh = imutils.resize(thresh, width=28) else: thresh = imutils.resize(thresh, height=28) tH, tW = thresh.shape dX = int(max(0, 28 - tW) / 2.0) dY = int(max(0, 28 - tH) / 2.0) padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY, left=dX, right=dX, borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)) padded = cv2.resize(padded, (28, 28)) padded = padded.astype('float32') / 255.0 padded = np.expand_dims(padded, axis=-1) chars.append((padded, (x, y, w, h))) # plot isolated characters n_cols = 10 n_rows = int(np.floor(len(chars)/ n_cols)+1) fig = plt.figure(figsize=(1.5*n_cols,1.5*n_rows)) for i,char in enumerate(chars): ax = plt.subplot(n_rows,n_cols,i+1) ax.imshow(char[0][:,:,0],cmap=cm.binary,aspect='auto') #plt.axis('off') plt.tight_layout() boxes = [b[1] for b in chars] chars = np.array([c[0] for c in chars], dtype='float32') preds = model.predict(chars) labelNames = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
code
122264859/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, fill_mode='nearest') test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow(train_images, train_labels, batch_size=50, shuffle=True) validation_generator = test_datagen.flow(test_images, test_labels, batch_size=50, shuffle=True) history = model.fit(train_generator, steps_per_epoch=750, epochs=100, validation_data=validation_generator, validation_steps=75, verbose=2) model.save('/kaggle/working/model_v2')
code
122264859/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm import cv2 import imutils import matplotlib.pyplot as plt image_path = '/kaggle/input/tester2/pja 4.jpg' image = cv2.imread(image_path) edged = cv2.Canny(blurred, 30, 250) cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sort_contours(cnts, method='left-to-right')[0] figure = plt.figure(figsize=(7, 7)) plt.axis('off') plt.imshow(edged, cmap=cm.binary_r)
code
122264859/cell_17
[ "text_plain_output_1.png" ]
from imutils.contours import sort_contours from matplotlib import cm from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop import cv2 import imutils import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() image_path = '/kaggle/input/tester2/pja 4.jpg' image = cv2.imread(image_path) # perform edge detection, find contours in the edge map, and sort the # resulting contours from left-to-right edged = cv2.Canny(blurred, 30, 250) #low_threshold, high_threshold cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sort_contours(cnts, method="left-to-right")[0] figure = plt.figure(figsize=(7,7)) plt.axis('off'); plt.imshow(edged,cmap=cm.binary_r); chars = [] for c in cnts: x, y, w, h = cv2.boundingRect(c) roi = cropped[y:y + h, x:x + w] thresh = cv2.threshold(roi, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] tH, tW = thresh.shape if tW > tH: thresh = imutils.resize(thresh, width=28) else: thresh = imutils.resize(thresh, height=28) tH, tW = thresh.shape dX = int(max(0, 28 - tW) / 2.0) dY = int(max(0, 28 - tH) / 2.0) padded = cv2.copyMakeBorder(thresh, top=dY, bottom=dY, left=dX, right=dX, borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)) padded = cv2.resize(padded, (28, 28)) padded = padded.astype('float32') / 255.0 padded = np.expand_dims(padded, axis=-1) chars.append((padded, (x, y, w, h))) n_cols = 10 n_rows = int(np.floor(len(chars) / n_cols) + 1) fig = plt.figure(figsize=(1.5 * n_cols, 1.5 * n_rows)) for i, char in enumerate(chars): ax = plt.subplot(n_rows, n_cols, i + 1) ax.imshow(char[0][:, :, 0], cmap=cm.binary, aspect='auto') plt.tight_layout()
code
122264859/cell_14
[ "text_plain_output_1.png" ]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cropped = gray[120:, :] blurred = cv2.GaussianBlur(cropped, (5, 5), 0) from matplotlib import cm fig = plt.figure(figsize=(16, 4)) ax = plt.subplot(1, 4, 1) ax.imshow(image) ax.set_title('original image') ax = plt.subplot(1, 4, 2) ax.imshow(gray, cmap=cm.binary_r) ax.set_axis_off() ax.set_title('grayscale image') ax = plt.subplot(1, 4, 3) ax.imshow(cropped, cmap=cm.binary_r) ax.set_axis_off() ax.set_title('cropped image') ax = plt.subplot(1, 4, 4) ax.imshow(blurred, cmap=cm.binary_r) ax.set_axis_off() ax.set_title('blurred image')
code
122264859/cell_10
[ "text_plain_output_1.png" ]
pip install imutils
code
122264859/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import load_model model_path = '/kaggle/working/model_v2' print('Loading NN model...') model = load_model(model_path) print('Done')
code
122264859/cell_5
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras.optimizers import RMSprop import numpy as np import pandas as pd import tensorflow as tf import tensorflow as tf mnist = keras.datasets.mnist (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = mnist.load_data() train_images_mnist = np.reshape(train_images_mnist, (train_images_mnist.shape[0], 28, 28, 1)) test_images_mnist = np.reshape(test_images_mnist, (test_images_mnist.shape[0], 28, 28, 1)) az_path = '/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv' AZ_data = pd.read_csv(az_path, header=None) AZ_labels = AZ_data.values[:, 0] AZ_images = AZ_data.values[:, 1:] AZ_images = np.reshape(AZ_images, (AZ_images.shape[0], 28, 28, 1)) from sklearn.model_selection import train_test_split test_size = float(len(test_labels_mnist)) / len(train_labels_mnist) train_images_AZ, test_images_AZ, train_labels_AZ, test_labels_AZ = train_test_split(AZ_images, AZ_labels, test_size=test_size) train_labels_mnist = train_labels_mnist + max(AZ_labels) + 1 test_labels_mnist = test_labels_mnist + max(AZ_labels) + 1 train_images = np.concatenate((train_images_AZ, train_images_mnist), axis=0) train_labels = np.concatenate((train_labels_AZ, train_labels_mnist)) test_images = np.concatenate((test_images_AZ, test_images_mnist), axis=0) test_labels = np.concatenate((test_labels_AZ, test_labels_mnist)) import tensorflow as tf from tensorflow.keras.optimizers import RMSprop model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(len(np.unique(train_labels)), activation='softmax')]) model.compile(optimizer=RMSprop(learning_rate=0.0001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
code
16115621/cell_9
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) train['SalePrice'].hist(bins=50) y = train['SalePrice'].reset_index(drop=True)
code
16115621/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.describe()
code
16115621/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x.info()
code
16115621/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['SalePrice'].hist(bins=50)
code
16115621/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # visualization import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x['MSSubClass'] = x['MSSubClass'].apply(str) x['YrSold'] = x['YrSold'].astype(str) x['MoSold'] = x['MoSold'].astype(str) x['Functional'] = x['Functional'].fillna('Typ') x['Electrical'] = x['Electrical'].fillna('SBrkr') x['KitchenQual'] = x['KitchenQual'].fillna('TA') x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0]) x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0]) x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0]) for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'): x[col] = x[col].fillna(0) for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: x[col] = x[col].fillna('None') for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): x[col] = x[col].fillna(0) for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): x[col] = x[col].fillna('None') objects = [] for i in x.columns: if x[i].dtype == object: objects.append(i) x.update(x[objects].fillna('None')) numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numerics = [] for i in x.columns: if x[i].dtype in numeric_dtypes: numerics.append(i) x.update(x[numerics].fillna(0)) x['total_sf'] = x['TotalBsmtSF'] + x['BsmtFinSF1'] + x['BsmtFinSF2'] + x['1stFlrSF'] + x['2ndFlrSF'] x['total_bathrooms'] = x['FullBath'] + 0.5 * x['HalfBath'] + x['BsmtFullBath'] + 0.5 * x['BsmtHalfBath'] x['total_porch_sf'] = x['OpenPorchSF'] + x['3SsnPorch'] + x['EnclosedPorch'] + x['ScreenPorch'] + x['WoodDeckSF'] x['hasPool'] = x['PoolArea'].apply(lambda x: 1 if x > 0 else 0) x['has2ndFloor'] = x['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0) x['hasGarage'] = x['GarageArea'].apply(lambda x: 1 if x > 0 else 0) x['hasBasement'] = x['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0) x['hasFireplace'] = x['Fireplaces'].apply(lambda x: 1 if x > 0 else 0) column_print = ['LotFrontage', 'LotArea', 'OverallQual', 'OverallQual'] f, axes = plt.subplots(2, 2, figsize=(7, 7), sharex=True) for i in x.columns: if i in column_print: sns.distplot(x[i]) numeric_plot.append(i)
code
16115621/cell_14
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x['MSSubClass'] = x['MSSubClass'].apply(str) x['YrSold'] = x['YrSold'].astype(str) x['MoSold'] = x['MoSold'].astype(str) x['Functional'] = x['Functional'].fillna('Typ') x['Electrical'] = x['Electrical'].fillna('SBrkr') x['KitchenQual'] = x['KitchenQual'].fillna('TA') x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0]) x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0]) x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0]) for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'): x[col] = x[col].fillna(0) for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: x[col] = x[col].fillna('None') for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): x[col] = x[col].fillna(0) for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): x[col] = x[col].fillna('None') objects = [] for i in x.columns: if x[i].dtype == object: objects.append(i) x.update(x[objects].fillna('None')) numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numerics = [] for i in x.columns: if x[i].dtype in numeric_dtypes: numerics.append(i) x.update(x[numerics].fillna(0)) x['total_sf'] = x['TotalBsmtSF'] + x['BsmtFinSF1'] + x['BsmtFinSF2'] + x['1stFlrSF'] + x['2ndFlrSF'] x['total_bathrooms'] = x['FullBath'] + 0.5 * x['HalfBath'] + x['BsmtFullBath'] + 0.5 * x['BsmtHalfBath'] x['total_porch_sf'] = x['OpenPorchSF'] + x['3SsnPorch'] + x['EnclosedPorch'] + x['ScreenPorch'] + x['WoodDeckSF'] x['hasPool'] = x['PoolArea'].apply(lambda x: 1 if x > 0 else 0) x['has2ndFloor'] = x['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0) x['hasGarage'] = x['GarageArea'].apply(lambda x: 1 if x > 0 else 0) x['hasBasement'] = x['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0) x['hasFireplace'] = x['Fireplaces'].apply(lambda x: 1 if x > 0 else 0) x.describe()
code
16115621/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x.describe()
code
16115621/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x['MSSubClass'] = x['MSSubClass'].apply(str) x['YrSold'] = x['YrSold'].astype(str) x['MoSold'] = x['MoSold'].astype(str) x['Functional'] = x['Functional'].fillna('Typ') x['Electrical'] = x['Electrical'].fillna('SBrkr') x['KitchenQual'] = x['KitchenQual'].fillna('TA') x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0]) x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0]) x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0]) for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'): x[col] = x[col].fillna(0) for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: x[col] = x[col].fillna('None') for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): x[col] = x[col].fillna(0) for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): x[col] = x[col].fillna('None') objects = [] for i in x.columns: if x[i].dtype == object: objects.append(i) x.update(x[objects].fillna('None')) numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numerics = [] for i in x.columns: if x[i].dtype in numeric_dtypes: numerics.append(i) x.update(x[numerics].fillna(0)) x.info()
code
16115621/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
72097728/cell_9
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split df = pd.read_csv('../input/30-days-of-ml/train.csv') test_df = pd.read_csv('../input/30-days-of-ml/test.csv') sample_df = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') y = df.target X = df.drop(['target'], axis=1) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 5 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor(n_estimators=100, random_state=0) from sklearn.metrics import mean_absolute_error my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) my_pipeline.fit(X_train, y_train) preds = my_pipeline.predict(X_valid) from sklearn.metrics import mean_squared_error score = mean_squared_error(y_valid, preds) print('MSE:', score)
code
72097728/cell_3
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split df = pd.read_csv('../input/30-days-of-ml/train.csv') test_df = pd.read_csv('../input/30-days-of-ml/test.csv') sample_df = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') y = df.target X = df.drop(['target'], axis=1) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) categorical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 5 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 5 and X_train_full[cname].dtype == 'object']
code
2003139/cell_9
[ "text_plain_output_1.png" ]
from PIL import Image from keras import optimizers, losses, activations, models from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate from random import shuffle import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) list_paths = [] for subdir, dirs, files in os.walk('../input'): for file in files: filepath = subdir + os.sep + file list_paths.append(filepath) list_train = [filepath for filepath in list_paths if 'train/' in filepath] shuffle(list_train) list_test = [filepath for filepath in list_paths if 'test/' in filepath] list_train = list_train list_test = list_test index = [os.path.basename(filepath) for filepath in list_test] list_classes = list(set([os.path.dirname(filepath).split(os.sep)[-1] for filepath in list_paths if 'train' in filepath])) def get_class_from_path(filepath): return os.path.dirname(filepath).split(os.sep)[-1] def read_and_resize(filepath): im_array = np.array(Image.open(filepath), dtype='uint8') pil_im = Image.fromarray(im_array) new_array = np.array(pil_im.resize((256, 256))) return new_array / 255 def label_transform(labels): labels = pd.get_dummies(pd.Series(labels)) label_index = labels.columns.values return (labels, label_index) from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard from keras import optimizers, losses, activations, models from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate input_shape = (256, 256, 3) nclass = len(label_index) def get_model(): nclass = len(label_index) inp = Input(shape=input_shape) norm_inp = BatchNormalization()(inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding='same')(norm_inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2)(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2)(img_1) img_1 = Convolution2D(64, kernel_size=2, activation=activations.relu, padding='same')(img_1) img_1 = Convolution2D(20, kernel_size=2, activation=activations.relu, padding='same')(img_1) img_1 = GlobalMaxPool2D()(img_1) img_1 = Dropout(rate=0.2)(img_1) dense_1 = Dense(20, activation=activations.relu)(img_1) dense_1 = Dense(nclass, activation=activations.softmax)(dense_1) model = models.Model(inputs=inp, outputs=dense_1) opt = optimizers.Adam() model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc']) model.summary() return model
code
2003139/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import os from PIL import Image from skimage.transform import resize from random import shuffle from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2003139/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image from keras import optimizers, losses, activations, models from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate from random import shuffle import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) list_paths = [] for subdir, dirs, files in os.walk('../input'): for file in files: filepath = subdir + os.sep + file list_paths.append(filepath) list_train = [filepath for filepath in list_paths if 'train/' in filepath] shuffle(list_train) list_test = [filepath for filepath in list_paths if 'test/' in filepath] list_train = list_train list_test = list_test index = [os.path.basename(filepath) for filepath in list_test] list_classes = list(set([os.path.dirname(filepath).split(os.sep)[-1] for filepath in list_paths if 'train' in filepath])) def get_class_from_path(filepath): return os.path.dirname(filepath).split(os.sep)[-1] def read_and_resize(filepath): im_array = np.array(Image.open(filepath), dtype='uint8') pil_im = Image.fromarray(im_array) new_array = np.array(pil_im.resize((256, 256))) return new_array / 255 def label_transform(labels): labels = pd.get_dummies(pd.Series(labels)) label_index = labels.columns.values return (labels, label_index) X_train = np.array([read_and_resize(filepath) for filepath in list_train]) X_test = np.array([read_and_resize(filepath) for filepath in list_test]) labels = [get_class_from_path(filepath) for filepath in list_train] y, label_index = label_transform(labels) y = np.array(y) from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard from keras import optimizers, losses, activations, models from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate input_shape = (256, 256, 3) nclass = len(label_index) def get_model(): nclass = len(label_index) inp = Input(shape=input_shape) norm_inp = BatchNormalization()(inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding='same')(norm_inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2)(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding='same')(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2)(img_1) img_1 = Convolution2D(64, kernel_size=2, activation=activations.relu, padding='same')(img_1) img_1 = Convolution2D(20, kernel_size=2, activation=activations.relu, padding='same')(img_1) img_1 = GlobalMaxPool2D()(img_1) img_1 = Dropout(rate=0.2)(img_1) dense_1 = Dense(20, activation=activations.relu)(img_1) dense_1 = Dense(nclass, activation=activations.softmax)(dense_1) model = models.Model(inputs=inp, outputs=dense_1) opt = optimizers.Adam() model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc']) model.summary() return model model = get_model() file_path = 'weights.best.hdf5' checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max') early = EarlyStopping(monitor='val_acc', mode='max', patience=1) callbacks_list = [checkpoint, early] history = model.fit(X_train, y, validation_split=0.1, epochs=3, shuffle=True, verbose=2, callbacks=callbacks_list) model.load_weights(file_path)
code
105185803/cell_1
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.express as px import os !pip install kaleido
code
105185803/cell_5
[ "text_html_output_2.png" ]
import pandas as pd import plotly.express as px stream = open('../input/bbk-kunpeng/perf_test_result.txt', 'r') stream = stream.read() ds = pd.DataFrame(columns=['N', 'NRHS', 'data_type', 'gflops', 'uplo']) datatype = 'single' N = 0 NRHS = 0 uplo = 'U' gflops = 0.0 cnt = 0 for line in stream.split('\n'): if line == 'test SINGLE': datatype = 'single' elif line == 'test DOUBLE': datatype = 'double' elif line == 'test COMPLEX': datatype = 'complex' elif line == 'test COMPLEX16': datatype = 'complex16' elif line.startswith('N='): splited = line.split(',') N = int(splited[0].split('=')[1]) NRHS = int(splited[1].split('=')[1]) uplo = splited[2].split('=')[1] elif line.startswith('gflops'): gflops = float(line.split('=')[1]) ds.loc[cnt] = [N, NRHS, datatype, gflops, uplo] cnt += 1 ds.to_csv('performance test.csv') from _plotly_utils.basevalidators import textwrap fig = px.line(ds, x='N', y='gflops', color='data_type', facet_col_wrap=2, title='Performance of BBK on 32 core') fig.show()
code
121149832/cell_4
[ "application_vnd.jupyter.stderr_output_766.png", "application_vnd.jupyter.stderr_output_116.png", "application_vnd.jupyter.stderr_output_74.png", "application_vnd.jupyter.stderr_output_268.png", "application_vnd.jupyter.stderr_output_362.png", "text_plain_output_743.png", "text_plain_output_673.png", "text_plain_output_445.png", "text_plain_output_201.png", "text_plain_output_261.png", "text_plain_output_775.png", "text_plain_output_819.png", "text_plain_output_565.png", "application_vnd.jupyter.stderr_output_566.png", "application_vnd.jupyter.stderr_output_578.png", "application_vnd.jupyter.stderr_output_516.png", "application_vnd.jupyter.stderr_output_672.png", "text_plain_output_521.png", "text_plain_output_769.png", "text_plain_output_205.png", "application_vnd.jupyter.stderr_output_732.png", "application_vnd.jupyter.stderr_output_222.png", "application_vnd.jupyter.stderr_output_626.png", "application_vnd.jupyter.stderr_output_96.png", "text_plain_output_693.png", "application_vnd.jupyter.stderr_output_642.png", "application_vnd.jupyter.stderr_output_640.png", "text_plain_output_511.png", "application_vnd.jupyter.stderr_output_836.png", "text_plain_output_271.png", "application_vnd.jupyter.stderr_output_808.png", "text_plain_output_475.png", "application_vnd.jupyter.stderr_output_296.png", "text_plain_output_455.png", "text_plain_output_223.png", "application_vnd.jupyter.stderr_output_110.png", "text_plain_output_715.png", "text_plain_output_579.png", "text_plain_output_793.png", "text_plain_output_629.png", "text_plain_output_287.png", "application_vnd.jupyter.stderr_output_112.png", "text_plain_output_181.png", "text_plain_output_137.png", "application_vnd.jupyter.stderr_output_400.png", "application_vnd.jupyter.stderr_output_212.png", "application_vnd.jupyter.stderr_output_768.png", "application_vnd.jupyter.stderr_output_700.png", "application_vnd.jupyter.stderr_output_458.png", "application_vnd.jupyter.stderr_output_634.png", "application_vnd.jupyter.stderr_output_740.png", "text_plain_output_139.png", "application_vnd.jupyter.stderr_output_420.png", "text_plain_output_813.png", "text_plain_output_35.png", "text_plain_output_697.png", "text_plain_output_501.png", "text_plain_output_593.png", "application_vnd.jupyter.stderr_output_24.png", "application_vnd.jupyter.stderr_output_354.png", "text_plain_output_685.png", "application_vnd.jupyter.stderr_output_16.png", "application_vnd.jupyter.stderr_output_274.png", "application_vnd.jupyter.stderr_output_610.png", "application_vnd.jupyter.stderr_output_632.png", "application_vnd.jupyter.stderr_output_368.png", "text_plain_output_449.png", "text_plain_output_117.png", "application_vnd.jupyter.stderr_output_760.png", "application_vnd.jupyter.stderr_output_474.png", "application_vnd.jupyter.stderr_output_258.png", "text_plain_output_367.png", "application_vnd.jupyter.stderr_output_758.png", "application_vnd.jupyter.stderr_output_668.png", "application_vnd.jupyter.stderr_output_622.png", "text_plain_output_395.png", "application_vnd.jupyter.stderr_output_286.png", "application_vnd.jupyter.stderr_output_426.png", "text_plain_output_617.png", "application_vnd.jupyter.stderr_output_152.png", "application_vnd.jupyter.stderr_output_156.png", "text_plain_output_307.png", "application_vnd.jupyter.stderr_output_522.png", "application_vnd.jupyter.stderr_output_804.png", "application_vnd.jupyter.stderr_output_710.png", "application_vnd.jupyter.stderr_output_684.png", "application_vnd.jupyter.stderr_output_70.png", "application_vnd.jupyter.stderr_output_310.png", "text_plain_output_799.png", "application_vnd.jupyter.stderr_output_554.png", "text_plain_output_833.png", "application_vnd.jupyter.stderr_output_204.png", "text_plain_output_399.png", "application_vnd.jupyter.stderr_output_284.png", "text_plain_output_671.png", "application_vnd.jupyter.stderr_output_124.png", "application_vnd.jupyter.stderr_output_786.png", "text_plain_output_195.png", "application_vnd.jupyter.stderr_output_498.png", "application_vnd.jupyter.stderr_output_838.png", "text_plain_output_471.png", "text_plain_output_219.png", "application_vnd.jupyter.stderr_output_52.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "application_vnd.jupyter.stderr_output_172.png", "text_plain_output_187.png", "text_plain_output_309.png", "application_vnd.jupyter.stderr_output_512.png", "text_plain_output_143.png", "application_vnd.jupyter.stderr_output_348.png", "text_plain_output_37.png", "application_vnd.jupyter.stderr_output_32.png", "application_vnd.jupyter.stderr_output_246.png", "application_vnd.jupyter.stderr_output_704.png", "application_vnd.jupyter.stderr_output_502.png", "application_vnd.jupyter.stderr_output_722.png", "application_vnd.jupyter.stderr_output_176.png", "application_vnd.jupyter.stderr_output_356.png", "text_plain_output_477.png", "application_vnd.jupyter.stderr_output_802.png", "text_plain_output_627.png", "application_vnd.jupyter.stderr_output_506.png", "text_plain_output_613.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "application_vnd.jupyter.stderr_output_346.png", "text_plain_output_79.png", "text_plain_output_331.png", "application_vnd.jupyter.stderr_output_382.png", "text_plain_output_809.png", "application_vnd.jupyter.stderr_output_170.png", "application_vnd.jupyter.stderr_output_132.png", "text_plain_output_5.png", "text_plain_output_75.png", "application_vnd.jupyter.stderr_output_692.png", "application_vnd.jupyter.stderr_output_540.png", "application_vnd.jupyter.stderr_output_48.png", "application_vnd.jupyter.stderr_output_236.png", "application_vnd.jupyter.stderr_output_418.png", "application_vnd.jupyter.stderr_output_636.png", "text_plain_output_167.png", "application_vnd.jupyter.stderr_output_550.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_687.png", "application_vnd.jupyter.stderr_output_378.png", "application_vnd.jupyter.stderr_output_764.png", "application_vnd.jupyter.stderr_output_432.png", "text_plain_output_321.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_472.png", "application_vnd.jupyter.stderr_output_774.png", "application_vnd.jupyter.stderr_output_776.png", "application_vnd.jupyter.stderr_output_814.png", "text_plain_output_115.png", "application_vnd.jupyter.stderr_output_504.png", "text_plain_output_407.png", "application_vnd.jupyter.stderr_output_552.png", "application_vnd.jupyter.stderr_output_694.png", "text_plain_output_355.png", "application_vnd.jupyter.stderr_output_800.png", "text_plain_output_15.png", "application_vnd.jupyter.stderr_output_618.png", "text_plain_output_133.png", "application_vnd.jupyter.stderr_output_392.png", "application_vnd.jupyter.stderr_output_806.png", "application_vnd.jupyter.stderr_output_690.png", "text_plain_output_771.png", "text_plain_output_651.png", "application_vnd.jupyter.stderr_output_666.png", "application_vnd.jupyter.stderr_output_414.png", "application_vnd.jupyter.stderr_output_436.png", "application_vnd.jupyter.stderr_output_608.png", "text_plain_output_437.png", "application_vnd.jupyter.stderr_output_146.png", "text_plain_output_699.png", "text_plain_output_387.png", "text_plain_output_555.png", "application_vnd.jupyter.stderr_output_324.png", "application_vnd.jupyter.stderr_output_528.png", "application_vnd.jupyter.stderr_output_360.png", "text_plain_output_759.png", "application_vnd.jupyter.stderr_output_484.png", "application_vnd.jupyter.stderr_output_674.png", "text_plain_output_375.png", "text_plain_output_831.png", "text_plain_output_659.png", "text_plain_output_515.png", "text_plain_output_157.png", "application_vnd.jupyter.stderr_output_190.png", "application_vnd.jupyter.stderr_output_380.png", "text_plain_output_773.png", "application_vnd.jupyter.stderr_output_270.png", "text_plain_output_317.png", "text_plain_output_251.png", "application_vnd.jupyter.stderr_output_344.png", "application_vnd.jupyter.stderr_output_18.png", "text_plain_output_423.png", "application_vnd.jupyter.stderr_output_86.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_334.png", "application_vnd.jupyter.stderr_output_526.png", "text_plain_output_633.png", "application_vnd.jupyter.stderr_output_754.png", "application_vnd.jupyter.stderr_output_782.png", "application_vnd.jupyter.stderr_output_38.png", "application_vnd.jupyter.stderr_output_482.png", "application_vnd.jupyter.stderr_output_568.png", "text_plain_output_325.png", "application_vnd.jupyter.stderr_output_240.png", "text_plain_output_785.png", "text_plain_output_203.png", "text_plain_output_505.png", "application_vnd.jupyter.stderr_output_272.png", "application_vnd.jupyter.stderr_output_88.png", "text_plain_output_603.png", "text_plain_output_655.png", "text_plain_output_119.png", "text_plain_output_373.png", "application_vnd.jupyter.stderr_output_148.png", "text_plain_output_741.png", "application_vnd.jupyter.stderr_output_520.png", "text_plain_output_551.png", "text_plain_output_583.png", "application_vnd.jupyter.stderr_output_58.png", "application_vnd.jupyter.stderr_output_638.png", "application_vnd.jupyter.stderr_output_66.png", "text_plain_output_131.png", "text_plain_output_817.png", "text_plain_output_343.png", "application_vnd.jupyter.stderr_output_724.png", "application_vnd.jupyter.stderr_output_718.png", "text_plain_output_123.png", "application_vnd.jupyter.stderr_output_68.png", "text_plain_output_31.png", "application_vnd.jupyter.stderr_output_106.png", "text_plain_output_379.png", "application_vnd.jupyter.stderr_output_224.png", "text_plain_output_281.png", "text_plain_output_639.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_557.png", "application_vnd.jupyter.stderr_output_26.png", "application_vnd.jupyter.stderr_output_178.png", "text_plain_output_273.png", "application_vnd.jupyter.stderr_output_322.png", "application_vnd.jupyter.stderr_output_832.png", "text_plain_output_263.png", "text_plain_output_229.png", "application_vnd.jupyter.stderr_output_384.png", "text_plain_output_111.png", "application_vnd.jupyter.stderr_output_406.png", "application_vnd.jupyter.stderr_output_620.png", "application_vnd.jupyter.stderr_output_238.png", "application_vnd.jupyter.stderr_output_564.png", "text_plain_output_753.png", "text_plain_output_669.png", "text_plain_output_461.png", "application_vnd.jupyter.stderr_output_822.png", "application_vnd.jupyter.stderr_output_650.png", "application_vnd.jupyter.stderr_output_450.png", "application_vnd.jupyter.stderr_output_524.png", "text_plain_output_589.png", "text_plain_output_101.png", "application_vnd.jupyter.stderr_output_796.png", "application_vnd.jupyter.stderr_output_490.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_161.png", "text_plain_output_489.png", "application_vnd.jupyter.stderr_output_136.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_779.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_795.png", "text_plain_output_725.png", "text_plain_output_301.png", "application_vnd.jupyter.stderr_output_422.png", "application_vnd.jupyter.stderr_output_162.png", "application_vnd.jupyter.stderr_output_376.png", "application_vnd.jupyter.stderr_output_676.png", "application_vnd.jupyter.stderr_output_812.png", "application_vnd.jupyter.stderr_output_794.png", "application_vnd.jupyter.stderr_output_232.png", "text_plain_output_691.png", "application_vnd.jupyter.stderr_output_260.png", "text_plain_output_467.png", "text_plain_output_221.png", "application_vnd.jupyter.stderr_output_576.png", "application_vnd.jupyter.stderr_output_134.png", "text_plain_output_155.png", "application_vnd.jupyter.stderr_output_194.png", "text_plain_output_65.png", "text_plain_output_803.png", "text_plain_output_419.png", "application_vnd.jupyter.stderr_output_302.png", "text_plain_output_215.png", "application_vnd.jupyter.stderr_output_664.png", "application_vnd.jupyter.stderr_output_546.png", "application_vnd.jupyter.stderr_output_792.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_637.png", "application_vnd.jupyter.stderr_output_476.png", "text_plain_output_13.png", "application_vnd.jupyter.stderr_output_478.png", "application_vnd.jupyter.stderr_output_656.png", "text_plain_output_107.png", "application_vnd.jupyter.stderr_output_336.png", "application_vnd.jupyter.stderr_output_402.png", "application_vnd.jupyter.stderr_output_542.png", "application_vnd.jupyter.stderr_output_738.png", "text_plain_output_567.png", "application_vnd.jupyter.stderr_output_518.png", "text_plain_output_695.png", "application_vnd.jupyter.stderr_output_316.png", "application_vnd.jupyter.stderr_output_468.png", "application_vnd.jupyter.stderr_output_662.png", "application_vnd.jupyter.stderr_output_750.png", "text_plain_output_417.png", "text_plain_output_707.png", "text_plain_output_545.png", "application_vnd.jupyter.stderr_output_714.png", "text_plain_output_393.png", "application_vnd.jupyter.stderr_output_762.png", "application_vnd.jupyter.stderr_output_570.png", "application_vnd.jupyter.stderr_output_404.png", "application_vnd.jupyter.stderr_output_820.png", "text_plain_output_243.png", "application_vnd.jupyter.stderr_output_330.png", "text_plain_output_611.png", "application_vnd.jupyter.stderr_output_366.png", "application_vnd.jupyter.stderr_output_278.png", "text_plain_output_45.png", "text_plain_output_599.png", "application_vnd.jupyter.stderr_output_716.png", "text_plain_output_665.png", "application_vnd.jupyter.stderr_output_174.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "application_vnd.jupyter.stderr_output_454.png", "text_plain_output_277.png", "text_plain_output_457.png", "application_vnd.jupyter.stderr_output_510.png", "text_plain_output_739.png", "application_vnd.jupyter.stderr_output_12.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_837.png", "application_vnd.jupyter.stderr_output_720.png", "application_vnd.jupyter.stderr_output_574.png", "text_plain_output_561.png", "text_plain_output_431.png", "application_vnd.jupyter.stderr_output_644.png", "application_vnd.jupyter.stderr_output_342.png", "text_plain_output_159.png", "text_plain_output_713.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "application_vnd.jupyter.stderr_output_82.png", "text_plain_output_763.png", "application_vnd.jupyter.stderr_output_288.png", "text_plain_output_783.png", "text_plain_output_129.png", "application_vnd.jupyter.stderr_output_358.png", "application_vnd.jupyter.stderr_output_398.png", "application_vnd.jupyter.stderr_output_772.png", "application_vnd.jupyter.stderr_output_388.png", "text_plain_output_349.png", "application_vnd.jupyter.stderr_output_332.png", "application_vnd.jupyter.stderr_output_72.png", "text_plain_output_483.png", "text_plain_output_363.png", "text_plain_output_289.png", "application_vnd.jupyter.stderr_output_290.png", "application_vnd.jupyter.stderr_output_586.png", "text_plain_output_255.png", "application_vnd.jupyter.stderr_output_8.png", "text_plain_output_329.png", "text_plain_output_49.png", "application_vnd.jupyter.stderr_output_308.png", "text_plain_output_791.png", "text_plain_output_63.png", "application_vnd.jupyter.stderr_output_394.png", "application_vnd.jupyter.stderr_output_580.png", "text_plain_output_27.png", "text_plain_output_177.png", "text_plain_output_607.png", "application_vnd.jupyter.stderr_output_306.png", "application_vnd.jupyter.stderr_output_604.png", "application_vnd.jupyter.stderr_output_424.png", "application_vnd.jupyter.stderr_output_534.png", "text_plain_output_681.png", "application_vnd.jupyter.stderr_output_834.png", "text_plain_output_333.png", "text_plain_output_581.png", "application_vnd.jupyter.stderr_output_592.png", "application_vnd.jupyter.stderr_output_824.png", "application_vnd.jupyter.stderr_output_80.png", "text_plain_output_269.png", "application_vnd.jupyter.stderr_output_300.png", "application_vnd.jupyter.stderr_output_818.png", "text_plain_output_503.png", "text_plain_output_735.png", "text_plain_output_153.png", "application_vnd.jupyter.stderr_output_784.png", "application_vnd.jupyter.stderr_output_816.png", "text_plain_output_57.png", "application_vnd.jupyter.stderr_output_600.png", "application_vnd.jupyter.stderr_output_728.png", "text_plain_output_469.png", "application_vnd.jupyter.stderr_output_10.png", "application_vnd.jupyter.stderr_output_396.png", "text_plain_output_357.png", "text_plain_output_21.png", "application_vnd.jupyter.stderr_output_464.png", "application_vnd.jupyter.stderr_output_220.png", "application_vnd.jupyter.stderr_output_810.png", "text_plain_output_47.png", "text_plain_output_623.png", "application_vnd.jupyter.stderr_output_98.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "application_vnd.jupyter.stderr_output_34.png", "text_plain_output_535.png", "text_plain_output_527.png", "text_plain_output_183.png", "application_vnd.jupyter.stderr_output_536.png", "text_plain_output_149.png", "text_plain_output_383.png", "text_plain_output_207.png", "application_vnd.jupyter.stderr_output_756.png", "application_vnd.jupyter.stderr_output_444.png", "application_vnd.jupyter.stderr_output_90.png", "text_plain_output_391.png", "application_vnd.jupyter.stderr_output_538.png", "application_vnd.jupyter.stderr_output_352.png", "text_plain_output_413.png", "text_plain_output_709.png", "application_vnd.jupyter.stderr_output_584.png", "application_vnd.jupyter.stderr_output_144.png", "application_vnd.jupyter.stderr_output_140.png", "text_plain_output_663.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_657.png", "text_plain_output_427.png", "application_vnd.jupyter.stderr_output_214.png", "application_vnd.jupyter.stderr_output_44.png", "text_plain_output_141.png", "application_vnd.jupyter.stderr_output_742.png", "application_vnd.jupyter.stderr_output_590.png", "text_plain_output_225.png", "text_plain_output_701.png", "text_plain_output_191.png", "text_plain_output_609.png", "application_vnd.jupyter.stderr_output_320.png", "text_plain_output_737.png", "application_vnd.jupyter.stderr_output_544.png", "text_plain_output_821.png", "text_plain_output_259.png", "application_vnd.jupyter.stderr_output_440.png", "text_plain_output_447.png", "text_plain_output_801.png", "application_vnd.jupyter.stderr_output_160.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "application_vnd.jupyter.stderr_output_42.png", "text_plain_output_835.png", "text_plain_output_113.png", "text_plain_output_371.png", "application_vnd.jupyter.stderr_output_602.png", "application_vnd.jupyter.stderr_output_298.png", "application_vnd.jupyter.stderr_output_598.png", "application_vnd.jupyter.stderr_output_192.png", "application_vnd.jupyter.stderr_output_770.png", "text_plain_output_827.png", "text_plain_output_479.png", "application_vnd.jupyter.stderr_output_678.png", "application_vnd.jupyter.stderr_output_702.png", "text_plain_output_81.png", "text_plain_output_69.png", "application_vnd.jupyter.stderr_output_670.png", "application_vnd.jupyter.stderr_output_84.png", "text_plain_output_667.png", "application_vnd.jupyter.stderr_output_180.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_767.png", "text_plain_output_145.png", "application_vnd.jupyter.stderr_output_230.png", "text_plain_output_125.png", "application_vnd.jupyter.stderr_output_428.png", "application_vnd.jupyter.stderr_output_314.png", "application_vnd.jupyter.stderr_output_120.png", "text_plain_output_487.png", "application_vnd.jupyter.stderr_output_778.png", "application_vnd.jupyter.stderr_output_752.png", "text_plain_output_595.png", "text_plain_output_643.png", "text_plain_output_575.png", "application_vnd.jupyter.stderr_output_558.png", "text_plain_output_197.png", "application_vnd.jupyter.stderr_output_60.png", "application_vnd.jupyter.stderr_output_648.png", "application_vnd.jupyter.stderr_output_216.png", "text_plain_output_777.png", "text_plain_output_315.png", "text_plain_output_429.png", "application_vnd.jupyter.stderr_output_372.png", "application_vnd.jupyter.stderr_output_202.png", "text_plain_output_517.png", "text_plain_output_823.png", "text_plain_output_433.png", "text_plain_output_7.png", "application_vnd.jupyter.stderr_output_184.png", "application_vnd.jupyter.stderr_output_594.png", "text_plain_output_513.png", "text_plain_output_745.png", "application_vnd.jupyter.stderr_output_390.png", "application_vnd.jupyter.stderr_output_596.png", "text_plain_output_645.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_787.png", "application_vnd.jupyter.stderr_output_688.png", "text_plain_output_245.png", "application_vnd.jupyter.stderr_output_660.png", "text_plain_output_497.png", "text_plain_output_789.png", "application_vnd.jupyter.stderr_output_514.png", "application_vnd.jupyter.stderr_output_826.png", "application_vnd.jupyter.stderr_output_30.png", "text_plain_output_265.png", "application_vnd.jupyter.stderr_output_416.png", "application_vnd.jupyter.stderr_output_108.png", "application_vnd.jupyter.stderr_output_62.png", "text_plain_output_435.png", "text_plain_output_689.png", "application_vnd.jupyter.stderr_output_328.png", "text_plain_output_59.png", "text_plain_output_409.png", "text_plain_output_103.png", "text_plain_output_71.png", "application_vnd.jupyter.stderr_output_470.png", "text_plain_output_751.png", "text_plain_output_539.png", "application_vnd.jupyter.stderr_output_250.png", "application_vnd.jupyter.stderr_output_686.png", "text_plain_output_211.png", "application_vnd.jupyter.stderr_output_242.png", "application_vnd.jupyter.stderr_output_654.png", "application_vnd.jupyter.stderr_output_294.png", "text_plain_output_601.png", "application_vnd.jupyter.stderr_output_588.png", "text_plain_output_541.png", "application_vnd.jupyter.stderr_output_612.png", "application_vnd.jupyter.stderr_output_130.png", "application_vnd.jupyter.stderr_output_28.png", "application_vnd.jupyter.stderr_output_364.png", "application_vnd.jupyter.stderr_output_448.png", "application_vnd.jupyter.stderr_output_658.png", "application_vnd.jupyter.stderr_output_680.png", "text_plain_output_749.png", "text_plain_output_653.png", "text_plain_output_543.png", "text_plain_output_781.png", "text_plain_output_451.png", "application_vnd.jupyter.stderr_output_256.png", "text_plain_output_109.png", "application_vnd.jupyter.stderr_output_46.png", "application_vnd.jupyter.stderr_output_780.png", "text_plain_output_459.png", "text_plain_output_615.png", "text_plain_output_41.png", "application_vnd.jupyter.stderr_output_206.png", "application_vnd.jupyter.stderr_output_456.png", "text_plain_output_253.png", "application_vnd.jupyter.stderr_output_234.png", "application_vnd.jupyter.stderr_output_734.png", "application_vnd.jupyter.stderr_output_312.png", "text_plain_output_723.png", "application_vnd.jupyter.stderr_output_682.png", "application_vnd.jupyter.stderr_output_630.png", "text_plain_output_291.png", "application_vnd.jupyter.stderr_output_616.png", "application_vnd.jupyter.stderr_output_606.png", "application_vnd.jupyter.stderr_output_708.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_649.png", "application_vnd.jupyter.stderr_output_252.png", "application_vnd.jupyter.stderr_output_64.png", "application_vnd.jupyter.stderr_output_76.png", "text_plain_output_209.png", "text_plain_output_185.png", "application_vnd.jupyter.stderr_output_262.png", "text_plain_output_85.png", "text_plain_output_765.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_797.png", "text_plain_output_573.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "application_vnd.jupyter.stderr_output_480.png", "application_vnd.jupyter.stderr_output_798.png", "application_vnd.jupyter.stderr_output_572.png", "application_vnd.jupyter.stderr_output_386.png", "application_vnd.jupyter.stderr_output_20.png", "text_plain_output_635.png", "text_plain_output_703.png", "text_plain_output_711.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "application_vnd.jupyter.stderr_output_338.png", "application_vnd.jupyter.stderr_output_126.png", "application_vnd.jupyter.stderr_output_560.png", "text_plain_output_23.png", "text_plain_output_761.png", "application_vnd.jupyter.stderr_output_218.png", "application_vnd.jupyter.stderr_output_446.png", "application_vnd.jupyter.stderr_output_494.png", "text_plain_output_173.png", "application_vnd.jupyter.stderr_output_36.png", "text_plain_output_683.png", "application_vnd.jupyter.stderr_output_100.png", "text_plain_output_235.png", "application_vnd.jupyter.stderr_output_430.png", "application_vnd.jupyter.stderr_output_266.png", "text_plain_output_151.png", "text_plain_output_89.png", "application_vnd.jupyter.stderr_output_22.png", "text_plain_output_299.png", "text_plain_output_51.png", "text_plain_output_677.png", "application_vnd.jupyter.stderr_output_748.png", "application_vnd.jupyter.stderr_output_166.png", "text_plain_output_825.png", "application_vnd.jupyter.stderr_output_508.png", "text_plain_output_525.png", "application_vnd.jupyter.stderr_output_318.png", "text_plain_output_731.png", "text_plain_output_705.png", "application_vnd.jupyter.stderr_output_292.png", "application_vnd.jupyter.stderr_output_726.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "application_vnd.jupyter.stderr_output_744.png", "text_plain_output_537.png", "application_vnd.jupyter.stderr_output_408.png", "application_vnd.jupyter.stderr_output_374.png", "application_vnd.jupyter.stderr_output_500.png", "text_plain_output_569.png", "text_plain_output_239.png", "application_vnd.jupyter.stderr_output_186.png", "application_vnd.jupyter.stderr_output_168.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_719.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "application_vnd.jupyter.stderr_output_56.png", "application_vnd.jupyter.stderr_output_452.png", "text_plain_output_509.png", "application_vnd.jupyter.stderr_output_104.png", "text_plain_output_337.png", "application_vnd.jupyter.stderr_output_196.png", "text_plain_output_499.png", "application_vnd.jupyter.stderr_output_50.png", "text_plain_output_807.png", "text_plain_output_563.png", "application_vnd.jupyter.stderr_output_736.png", "application_vnd.jupyter.stderr_output_114.png", "text_plain_output_97.png", "text_plain_output_729.png", "application_vnd.jupyter.stderr_output_828.png", "application_vnd.jupyter.stderr_output_492.png", "text_plain_output_717.png", "text_plain_output_227.png", "application_vnd.jupyter.stderr_output_226.png", "text_plain_output_453.png", "text_plain_output_1.png", "text_plain_output_33.png", "application_vnd.jupyter.stderr_output_128.png", "application_vnd.jupyter.stderr_output_150.png", "text_plain_output_631.png", "text_plain_output_39.png", "application_vnd.jupyter.stderr_output_556.png", "text_plain_output_335.png", "application_vnd.jupyter.stderr_output_142.png", "application_vnd.jupyter.stderr_output_326.png", "text_plain_output_233.png", "text_plain_output_473.png", "application_vnd.jupyter.stderr_output_304.png", "application_vnd.jupyter.stderr_output_830.png", "text_plain_output_385.png", "text_plain_output_55.png", "text_plain_output_293.png", "text_plain_output_199.png", "application_vnd.jupyter.stderr_output_530.png", "text_plain_output_463.png", "text_plain_output_319.png", "text_plain_output_805.png", "application_vnd.jupyter.stderr_output_138.png", "text_plain_output_829.png", "application_vnd.jupyter.stderr_output_412.png", "application_vnd.jupyter.stderr_output_548.png", "text_plain_output_93.png", "application_vnd.jupyter.stderr_output_200.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "application_vnd.jupyter.stderr_output_280.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_491.png", "text_plain_output_679.png", "text_plain_output_641.png", "text_plain_output_249.png", "application_vnd.jupyter.stderr_output_122.png", "application_vnd.jupyter.stderr_output_488.png", "application_vnd.jupyter.stderr_output_624.png", "application_vnd.jupyter.stderr_output_94.png", "text_plain_output_619.png", "application_vnd.jupyter.stderr_output_282.png", "application_vnd.jupyter.stderr_output_730.png", "text_plain_output_17.png", "text_plain_output_323.png", "application_vnd.jupyter.stderr_output_462.png", "text_plain_output_755.png", "application_vnd.jupyter.stderr_output_652.png", "application_vnd.jupyter.stderr_output_182.png", "application_vnd.jupyter.stderr_output_158.png", "text_plain_output_597.png", "application_vnd.jupyter.stderr_output_78.png", "text_plain_output_11.png", "application_vnd.jupyter.stderr_output_698.png", "application_vnd.jupyter.stderr_output_370.png", "text_plain_output_481.png", "application_vnd.jupyter.stderr_output_276.png", "application_vnd.jupyter.stderr_output_188.png", "application_vnd.jupyter.stderr_output_696.png", "application_vnd.jupyter.stderr_output_14.png", "text_plain_output_267.png", "application_vnd.jupyter.stderr_output_562.png", "text_plain_output_553.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_811.png", "application_vnd.jupyter.stderr_output_790.png", "application_vnd.jupyter.stderr_output_706.png", "text_plain_output_625.png", "application_vnd.jupyter.stderr_output_350.png", "text_plain_output_577.png", "application_vnd.jupyter.stderr_output_54.png", "application_vnd.jupyter.stderr_output_118.png", "application_vnd.jupyter.stderr_output_154.png", "text_plain_output_727.png", "application_vnd.jupyter.stderr_output_438.png", "text_plain_output_747.png", "application_vnd.jupyter.stderr_output_442.png", "application_vnd.jupyter.stderr_output_198.png", "text_plain_output_519.png", "text_plain_output_733.png", "text_plain_output_721.png", "application_vnd.jupyter.stderr_output_712.png", "text_plain_output_757.png", "text_plain_output_303.png", "application_vnd.jupyter.stderr_output_746.png", "text_plain_output_621.png", "text_plain_output_377.png", "application_vnd.jupyter.stderr_output_460.png", "text_plain_output_95.png", "text_plain_output_339.png", "application_vnd.jupyter.stderr_output_228.png", "application_vnd.jupyter.stderr_output_614.png", "application_vnd.jupyter.stderr_output_254.png", "text_plain_output_547.png", "text_plain_output_369.png", "application_vnd.jupyter.stderr_output_582.png", "application_vnd.jupyter.stderr_output_628.png", "text_plain_output_587.png", "application_vnd.jupyter.stderr_output_466.png", "application_vnd.jupyter.stderr_output_340.png", "text_plain_output_365.png", "text_plain_output_815.png", "application_vnd.jupyter.stderr_output_208.png", "text_plain_output_61.png", "text_plain_output_585.png", "text_plain_output_83.png", "application_vnd.jupyter.stderr_output_248.png", "text_plain_output_647.png", "application_vnd.jupyter.stderr_output_210.png", "application_vnd.jupyter.stderr_output_92.png", "application_vnd.jupyter.stderr_output_164.png", "application_vnd.jupyter.stderr_output_102.png", "text_plain_output_397.png", "text_plain_output_661.png", "text_plain_output_389.png", "application_vnd.jupyter.stderr_output_788.png", "application_vnd.jupyter.stderr_output_410.png", "text_plain_output_351.png", "application_vnd.jupyter.stderr_output_40.png", "application_vnd.jupyter.stderr_output_532.png", "application_vnd.jupyter.stderr_output_244.png", "text_plain_output_135.png", "text_plain_output_285.png", "application_vnd.jupyter.stderr_output_264.png", "application_vnd.jupyter.stderr_output_486.png", "text_plain_output_675.png", "application_vnd.jupyter.stderr_output_646.png", "text_plain_output_493.png", "application_vnd.jupyter.stderr_output_434.png" ]
# Install nb_black for autoformatting !pip install nb_black --quiet
code
121149832/cell_19
[ "text_html_output_1.png" ]
from kaggle_secrets import UserSecretsClient from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import wandb import warnings import numpy as np import pandas as pd import math import random import time from collections import OrderedDict import tensorflow as tf from tqdm import tqdm import json import os import gc from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, GroupKFold, StratifiedGroupKFold import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD, AdamW from torch.optim.optimizer import Optimizer import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from torchinfo import summary import onnx import onnx_tf from onnx_tf.backend import prepare import warnings warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') VERSION = 12 def get_score(y_true, y_pred): score = accuracy_score(y_true, y_pred) return score def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True def load_relevant_data_subset_with_imputation(pq_path): data_columns = ['x', 'y', 'z'] data = pd.read_parquet(pq_path, columns=data_columns) data.replace(np.nan, 0, inplace=True) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def load_relevant_data_subset(pq_path): data_columns = ['x', 'y'] data = pd.read_parquet(pq_path, columns=data_columns) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def read_dict(file_path): path = os.path.expanduser(file_path) with open(path, 'r') as f: dic = json.load(f) return dic class CFG: num_workers = 2 apex = False scheduler = 'CosineAnnealingLR' epochs = 500 print_freq = 200 cosanneal_params = {'T_max': 5, 'eta_min': 3 * 1e-05, 'last_epoch': -1} reduce_params = {'mode': 'min', 'factor': 0.1, 'patience': 6, 'eps': 1e-06, 'verbose': True} cosanneal_res_params = {'T_0': 3, 'eta_min': 1e-06, 'T_mult': 1, 'last_epoch': -1} onecycle_params = {'pct_start': 0.1, 'div_factor': 10.0, 'max_lr': 0.001, 'steps_per_epoch': 3, 'epochs': 3} momentum = 0.9 model_name = 'NN_ArcFace' lr = 3 * 0.0001 weight_decay = 0.0001 gradient_accumulation_steps = 1 max_grad_norm = 1000 data_path = '../input/asl-signs/' debug = False arcface = True use_aggregation_dataset = True target_size = 250 rows_per_frame = 543 batch_size = 512 train = True early_stop = True target_col = 'label' scale = 30.0 margin = 0.5 easy_margin = False ls_eps = 0.0 fc_dim = 512 early_stopping_steps = 5 grad_cam = False seed = 42 import os OUTPUT_DIR = f'./{CFG.model_name}_version{VERSION}/' if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) def init_logger(log_file=OUTPUT_DIR + 'train.log'): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter('%(message)s')) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter('%(message)s')) logger.addHandler(handler1) logger.addHandler(handler2) return logger LOGGER = init_logger() train = pd.read_csv(f'{CFG.data_path}train.csv') label_index = read_dict(f'{CFG.data_path}sign_to_prediction_index_map.json') index_label = dict([(label_index[key], key) for key in label_index]) train['label'] = train['sign'].map(lambda sign: label_index[sign]) if CFG.debug: CFG.epochs = 1 train = train.sample(n=4000, random_state=CFG.seed).reset_index(drop=True) from kaggle_secrets import UserSecretsClient user_secrets = UserSecretsClient() wandb_api = user_secrets.get_secret('wandb_key') import wandb wandb.login(key=wandb_api) def class2dict(f): return dict(((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))) run = wandb.init(project='GISLR Competition', name=f'{CFG.model_name}_Version{VERSION}', config=class2dict(CFG), group=CFG.model_name, job_type='train')
code
121149832/cell_3
[ "text_html_output_4.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png", "text_html_output_3.png" ]
!pip install onnx_tf !pip install tflite-runtime !pip install -q --upgrade wandb
code
121149832/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import torch.nn as nn import warnings import numpy as np import pandas as pd import math import random import time from collections import OrderedDict import tensorflow as tf from tqdm import tqdm import json import os import gc from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, GroupKFold, StratifiedGroupKFold import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD, AdamW from torch.optim.optimizer import Optimizer import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from torchinfo import summary import onnx import onnx_tf from onnx_tf.backend import prepare import warnings warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') VERSION = 12 def get_score(y_true, y_pred): score = accuracy_score(y_true, y_pred) return score def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True def load_relevant_data_subset_with_imputation(pq_path): data_columns = ['x', 'y', 'z'] data = pd.read_parquet(pq_path, columns=data_columns) data.replace(np.nan, 0, inplace=True) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def load_relevant_data_subset(pq_path): data_columns = ['x', 'y'] data = pd.read_parquet(pq_path, columns=data_columns) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def read_dict(file_path): path = os.path.expanduser(file_path) with open(path, 'r') as f: dic = json.load(f) return dic lipsUpperOuter = [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291] lipsLowerOuter = [146, 91, 181, 84, 17, 314, 405, 321, 375, 291] lipsUpperInner = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308] lipsLowerInner = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308] lips = lipsUpperOuter + lipsLowerOuter + lipsUpperInner + lipsLowerInner class FeatureGen(nn.Module): def __init__(self): super(FeatureGen, self).__init__() pass def forward(self, x): x = x[:, :, :2] lips_x = x[:, lips, :].contiguous().view(-1, 43 * 2) lefth_x = x[:, 468:489, :].contiguous().view(-1, 21 * 2) pose_x = x[:, 489:522, :].contiguous().view(-1, 33 * 2) righth_x = x[:, 522:, :].contiguous().view(-1, 21 * 2) lefth_x = lefth_x[~torch.any(torch.isnan(lefth_x), dim=1), :] righth_x = righth_x[~torch.any(torch.isnan(righth_x), dim=1), :] x1m = torch.mean(lips_x, 0) x2m = torch.mean(lefth_x, 0) x3m = torch.mean(pose_x, 0) x4m = torch.mean(righth_x, 0) x1s = torch.std(lips_x, 0) x2s = torch.std(lefth_x, 0) x3s = torch.std(pose_x, 0) x4s = torch.std(righth_x, 0) xfeat = torch.cat([x1m, x2m, x3m, x4m, x1s, x2s, x3s, x4s], axis=0) xfeat = torch.where(torch.isnan(xfeat), torch.tensor(0.0, dtype=torch.float32), xfeat) return xfeat feature_converter = FeatureGen() X = np.load('/kaggle/input/isolated-sign-language-aggregation-preparation/feature_data.npy') y = np.load('/kaggle/input/isolated-sign-language-aggregation-preparation/feature_labels.npy') print(X.shape, y.shape)
code
121149832/cell_14
[ "text_plain_output_1.png" ]
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler from sklearn.metrics import accuracy_score import json import numpy as np import os import os import pandas as pd import random import torch import warnings import numpy as np import pandas as pd import math import random import time from collections import OrderedDict import tensorflow as tf from tqdm import tqdm import json import os import gc from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, GroupKFold, StratifiedGroupKFold import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam, SGD, AdamW from torch.optim.optimizer import Optimizer import torchvision.models as models from torch.nn.parameter import Parameter from torch.utils.data import DataLoader, Dataset from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, CosineAnnealingLR, ReduceLROnPlateau from torchinfo import summary import onnx import onnx_tf from onnx_tf.backend import prepare import warnings warnings.filterwarnings('ignore') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') VERSION = 12 def get_score(y_true, y_pred): score = accuracy_score(y_true, y_pred) return score def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True def load_relevant_data_subset_with_imputation(pq_path): data_columns = ['x', 'y', 'z'] data = pd.read_parquet(pq_path, columns=data_columns) data.replace(np.nan, 0, inplace=True) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def load_relevant_data_subset(pq_path): data_columns = ['x', 'y'] data = pd.read_parquet(pq_path, columns=data_columns) n_frames = int(len(data) / CFG.rows_per_frame) data = data.values.reshape(n_frames, CFG.rows_per_frame, len(data_columns)) return data.astype(np.float32) def read_dict(file_path): path = os.path.expanduser(file_path) with open(path, 'r') as f: dic = json.load(f) return dic class CFG: num_workers = 2 apex = False scheduler = 'CosineAnnealingLR' epochs = 500 print_freq = 200 cosanneal_params = {'T_max': 5, 'eta_min': 3 * 1e-05, 'last_epoch': -1} reduce_params = {'mode': 'min', 'factor': 0.1, 'patience': 6, 'eps': 1e-06, 'verbose': True} cosanneal_res_params = {'T_0': 3, 'eta_min': 1e-06, 'T_mult': 1, 'last_epoch': -1} onecycle_params = {'pct_start': 0.1, 'div_factor': 10.0, 'max_lr': 0.001, 'steps_per_epoch': 3, 'epochs': 3} momentum = 0.9 model_name = 'NN_ArcFace' lr = 3 * 0.0001 weight_decay = 0.0001 gradient_accumulation_steps = 1 max_grad_norm = 1000 data_path = '../input/asl-signs/' debug = False arcface = True use_aggregation_dataset = True target_size = 250 rows_per_frame = 543 batch_size = 512 train = True early_stop = True target_col = 'label' scale = 30.0 margin = 0.5 easy_margin = False ls_eps = 0.0 fc_dim = 512 early_stopping_steps = 5 grad_cam = False seed = 42 import os OUTPUT_DIR = f'./{CFG.model_name}_version{VERSION}/' if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) def init_logger(log_file=OUTPUT_DIR + 'train.log'): from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter('%(message)s')) handler2 = FileHandler(filename=log_file) handler2.setFormatter(Formatter('%(message)s')) logger.addHandler(handler1) logger.addHandler(handler2) return logger LOGGER = init_logger() train = pd.read_csv(f'{CFG.data_path}train.csv') label_index = read_dict(f'{CFG.data_path}sign_to_prediction_index_map.json') index_label = dict([(label_index[key], key) for key in label_index]) train['label'] = train['sign'].map(lambda sign: label_index[sign]) if CFG.debug: CFG.epochs = 1 train = train.sample(n=4000, random_state=CFG.seed).reset_index(drop=True) train.head()
code
48166170/cell_13
[ "text_html_output_1.png" ]
k_range = range(1, 21) print('k range', k_range)
code
48166170/cell_39
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train) train_score = metrics.accuracy_score(y_train, pred_train) train_results.append(train_score) pred_val = clf_2.predict(X_val) val_score = metrics.accuracy_score(y_val, pred_val) val_results.append(val_score) clf_3 = KNeighborsClassifier() param_grid = [{'weights': ['uniform'], 'n_neighbors': list(range(1, 21))}, {'weights': ['distance'], 'n_neighbors': list(range(1, 21))}] gs = GridSearchCV(clf_3, param_grid, scoring='accuracy', cv=10) gs = gs.fit(X_train, y_train) clf_best = gs.best_estimator_ clf_best.fit(X_train, y_train) y_pred = clf_best.predict(X_test) print(metrics.accuracy_score(y_test, y_pred))
code
48166170/cell_26
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.model_selection import cross_val_score, learning_curve, validation_curve from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') X = df2.iloc[:, 0:8] y = df2.iloc[:, 8] val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train) train_score = metrics.accuracy_score(y_train, pred_train) train_results.append(train_score) pred_val = clf_2.predict(X_val) val_score = metrics.accuracy_score(y_val, pred_val) val_results.append(val_score) clf_2 = KNeighborsClassifier() def plot_validation_curve(clf, X, y, param_name, param_range): train_scores, test_scores = validation_curve(clf, X, y, cv=10, scoring='accuracy', param_name=param_name, param_range=param_range, n_jobs=-1) x_range = param_range train_scores_mean = np.mean(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) plot_validation_curve(clf_2, X_train, y_train, param_name='n_neighbors', param_range=range(1, 21))
code
48166170/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') X = df2.iloc[:, 0:8] y = df2.iloc[:, 8] print(X)
code
48166170/cell_17
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train) train_score = metrics.accuracy_score(y_train, pred_train) train_results.append(train_score) pred_val = clf_2.predict(X_val) val_score = metrics.accuracy_score(y_val, pred_val) val_results.append(val_score) plt.plot(k_range, val_results, 'b-', label='Val score') plt.plot(k_range, train_results, 'r-', label='Train score') plt.ylabel('Score') plt.xlabel('Model complexity: k') plt.legend() plt.grid(True) plt.show()
code
48166170/cell_31
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier clf_3 = KNeighborsClassifier() param_grid = [{'weights': ['uniform'], 'n_neighbors': list(range(1, 21))}, {'weights': ['distance'], 'n_neighbors': list(range(1, 21))}] print(param_grid)
code
48166170/cell_22
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier val_results = [] train_results = [] k_range = range(1, 21) for k in k_range: clf_2 = KNeighborsClassifier(n_neighbors=k) clf_2 = clf_2.fit(X_train, y_train) pred_train = clf_2.predict(X_train) train_score = metrics.accuracy_score(y_train, pred_train) train_results.append(train_score) pred_val = clf_2.predict(X_val) val_score = metrics.accuracy_score(y_val, pred_val) val_results.append(val_score) clf_2 = KNeighborsClassifier() print(clf_2)
code
48166170/cell_37
[ "image_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier clf_3 = KNeighborsClassifier() param_grid = [{'weights': ['uniform'], 'n_neighbors': list(range(1, 21))}, {'weights': ['distance'], 'n_neighbors': list(range(1, 21))}] gs = GridSearchCV(clf_3, param_grid, scoring='accuracy', cv=10) gs = gs.fit(X_train, y_train) clf_best = gs.best_estimator_ print('best model:', clf_best.get_params()) clf_best.fit(X_train, y_train)
code
48166170/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df2 = pd.read_csv('../input/diabetescsv/diabetes.csv') df2.head(10)
code
16163803/cell_13
[ "image_output_1.png" ]
"""his = model.fit_generator(train_gen, epochs=10, steps_per_epoch=len(X_train)/BATCH_SIZE, validation_data=test_gen, validation_steps=len(X_test)/BATCH_SIZE)"""
code
16163803/cell_4
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) plt.imshow(img_arr, cmap='gray') plt.title(img.split('.')[0]) break
code
16163803/cell_6
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) print(f'features shape {X.shape}.\nlabel shape {y.shape}.')
code
16163803/cell_2
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
16163803/cell_11
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) y = to_categorical(y, 2) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=X.shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(2, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=1 / 3)
code
16163803/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten import cv2
code
16163803/cell_7
[ "image_output_1.png" ]
from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) y = to_categorical(y, 2) print(f'features shape {X.shape}.\nlabel shape {y.shape}.')
code
16163803/cell_15
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) y = to_categorical(y, 2) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=X.shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(2, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=1 / 3) model.save_weights('CATSvsDOGS_model.h5') model.save('CNN_CAT.model') train_acc = model.evaluate(X_train, y_train, batch_size=32) test_acc = (model.evaluate(X_test, y_test, batch_size=32),) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18, 10)) ax1.plot(history.history['loss'], color='b', label='Training loss : {:0.4f}'.format(train_acc[0])) ax1.plot(history.history['val_loss'], color='r', label='validation loss : {:0.4f}'.format(test_acc[0][0])) ax1.set_xticks(np.arange(1, EPOCHS, 1)) ax1.set_yticks(np.arange(0, 1.0, 0.1)) ax1.legend() ax2.plot(history.history['acc'], color='b', label='Training accuracy : {0:.4f}'.format(train_acc[1])) ax2.plot(history.history['val_acc'], color='r', label='Validation accuracy : {0:.4f}'.format(test_acc[0][1])) ax2.set_xticks(np.arange(1, EPOCHS, 1)) ax2.set_yticks(np.arange(0.4, 1.2, 0.1)) legend = plt.legend(loc='best', shadow=True) plt.tight_layout() plt.show()
code
16163803/cell_16
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) y = to_categorical(y, 2) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=X.shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(2, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=1 / 3) model.save_weights('CATSvsDOGS_model.h5') model.save('CNN_CAT.model') train_acc = model.evaluate(X_train, y_train, batch_size=32) test_acc = (model.evaluate(X_test, y_test, batch_size=32),) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18, 10)) ax1.plot(history.history['loss'], color='b', label="Training loss : {:0.4f}".format(train_acc[0])) ax1.plot(history.history['val_loss'], color='r', label="validation loss : {:0.4f}".format(test_acc[0][0])) ax1.set_xticks(np.arange(1, EPOCHS, 1)) ax1.set_yticks(np.arange(0, 1., 0.1)) ax1.legend() ax2.plot(history.history['acc'], color='b', label="Training accuracy : {0:.4f}".format(train_acc[1])) ax2.plot(history.history['val_acc'], color='r',label="Validation accuracy : {0:.4f}".format(test_acc[0][1])) ax2.set_xticks(np.arange(1, EPOCHS, 1)) ax2.set_yticks(np.arange(0.4, 1.2, 0.1)) legend = plt.legend(loc='best', shadow=True) plt.tight_layout() plt.show() for img in os.listdir(TEST_DIR)[800:]: img_path = os.path.join(TEST_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) plt.imshow(img_arr, cmap='gray') plt.title(img.split('.')[0]) break
code
16163803/cell_14
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout from keras.layers import Dense, Flatten from keras.models import Sequential from keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os category = ['cat', 'dog'] EPOCHS = 50 IMGSIZE = 128 BATCH_SIZE = 32 STOPPING_PATIENCE = 15 VERBOSE = 1 MODEL_NAME = 'cnn_50epochs_imgsize128' OPTIMIZER = 'adam' TRAINING_DIR = '../input/dogs-vs-cats-redux-kernels-edition/train' TEST_DIR = '../input/dogs-vs-cats-redux-kernels-edition/test' for img in os.listdir(TRAINING_DIR)[7890:]: img_path = os.path.join(TRAINING_DIR, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) break def create_train_data(path): X = [] y = [] for img in os.listdir(path): if img == os.listdir(path)[7889]: continue img_path = os.path.join(path, img) img_arr = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) img_arr = cv2.resize(img_arr, (IMGSIZE, IMGSIZE)) img_arr = img_arr / 255.0 cat = np.where(img.split('.')[0] == 'dog', 1, 0) X.append(img_arr) y.append(cat) X = np.array(X).reshape(-1, IMGSIZE, IMGSIZE, 1) y = np.array(y) return (X, y) X, y = create_train_data(TRAINING_DIR) y = to_categorical(y, 2) model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=X.shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(2, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_split=1 / 3) model.save_weights('CATSvsDOGS_model.h5') model.save('CNN_CAT.model') train_acc = model.evaluate(X_train, y_train, batch_size=32) test_acc = (model.evaluate(X_test, y_test, batch_size=32),)
code
105192890/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader import pandas as pd import numpy as np import math import matplotlib.pyplot as plt from sklearn.metrics import r2_score warnings.simplefilter('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121150886/cell_25
[ "text_plain_output_1.png" ]
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import AutoTokenizer, AutoModel import re import torch import torch import unicodedata if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') def text_preprocessing(text): text = unicodedata.normalize('NFC', text) text = re.sub('(@.*?)[\\s]', ' ', text) text = re.sub('&amp', '&', text) text = re.sub('\\s+', ' ', text).strip() text = re.sub('^https?:\\/\\/.*[\\r\\n]*', '<URL>', text) return text import emoji import unicodedata def preprocessing_for_bert(data, version='mini', text_preprocessing_fn=text_preprocessing): input_ids = [] attention_masks = [] tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') if version == 'mini' else AutoTokenizer.from_pretrained('asafaya/bert-base-arabic') for i, sent in enumerate(data): encoded_sent = tokenizer.encode_plus(text=text_preprocessing_fn(sent), add_special_tokens=True, max_length=MAX_LEN, padding='max_length', return_attention_mask=True, truncation=True) input_ids.append(encoded_sent.get('input_ids')) attention_masks.append(encoded_sent.get('attention_mask')) input_ids = torch.tensor(input_ids) attention_masks = torch.tensor(attention_masks) return (input_ids, attention_masks) print('Tokenizing data...') test_inputs, test_masks = preprocessing_for_bert(X_test) test_dataset = TensorDataset(test_inputs, test_masks) test_sampler = SequentialSampler(test_dataset) test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=32)
code
121150886/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from ydata_profiling import ProfileReport import pandas as pd df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') profile = ProfileReport(df_reviews, title='Profiling Report') profile.to_notebook_iframe()
code
121150886/cell_6
[ "text_plain_output_1.png" ]
import torch if torch.cuda.is_available(): device = torch.device('cuda') print(f'Using {torch.cuda.device_count()} GPU(s)!') print(f'Device name: {torch.cuda.get_device_name(0)}') else: device = torch.device('cpu') print('No GPU available.')
code
121150886/cell_19
[ "text_plain_output_1.png" ]
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import AdamW, get_linear_schedule_with_warmup from transformers import AutoTokenizer, AutoModel import numpy as np import random import re import time import torch import torch import torch.nn as nn import unicodedata if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') def text_preprocessing(text): text = unicodedata.normalize('NFC', text) text = re.sub('(@.*?)[\\s]', ' ', text) text = re.sub('&amp', '&', text) text = re.sub('\\s+', ' ', text).strip() text = re.sub('^https?:\\/\\/.*[\\r\\n]*', '<URL>', text) return text import emoji import unicodedata def preprocessing_for_bert(data, version='mini', text_preprocessing_fn=text_preprocessing): input_ids = [] attention_masks = [] tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') if version == 'mini' else AutoTokenizer.from_pretrained('asafaya/bert-base-arabic') for i, sent in enumerate(data): encoded_sent = tokenizer.encode_plus(text=text_preprocessing_fn(sent), add_special_tokens=True, max_length=MAX_LEN, padding='max_length', return_attention_mask=True, truncation=True) input_ids.append(encoded_sent.get('input_ids')) attention_masks.append(encoded_sent.get('attention_mask')) input_ids = torch.tensor(input_ids) attention_masks = torch.tensor(attention_masks) return (input_ids, attention_masks) from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler train_labels = torch.tensor(y_train) val_labels = torch.tensor(y_val) batch_size = 16 train_data = TensorDataset(train_inputs, train_masks, train_labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) val_data = TensorDataset(val_inputs, val_masks, val_labels) val_sampler = RandomSampler(val_data) val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size) from transformers import AdamW, get_linear_schedule_with_warmup from torch.optim import SparseAdam, Adam def initialize_model(epochs=4, version='mini'): bert_classifier = BertClassifier(freeze_bert=False, version=version) bert_classifier.to(device) optimizer = AdamW(params=list(bert_classifier.parameters()), lr=5e-05, eps=1e-08) total_steps = len(train_dataloader) * epochs schedular = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) return (bert_classifier, optimizer, schedular) import random import time loss_fn = nn.CrossEntropyLoss() def set_seed(seed_value=42): random.seed(seed_value) np.random.seed(seed_value) torch.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) def train(model, train_dataloader, val_dataloader=None, epochs=4, evaluation=False): for epoch_i in range(epochs): t0_epoch, t0_batch = (time.time(), time.time()) total_loss, batch_loss, batch_counts = (0, 0, 0) model.train() for step, batch in enumerate(train_dataloader): batch_counts += 1 b_input_ids, b_attn_mask, b_labels = tuple((t.to(device) for t in batch)) model.zero_grad() logits = model(b_input_ids, b_attn_mask) loss = loss_fn(logits, b_labels) batch_loss += loss.item() total_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() if step % 100 == 0 and step != 0 or step == len(train_dataloader) - 1: time_elapsed = time.time() - t0_batch batch_loss, batch_counts = (0, 0) t0_batch = time.time() avg_train_loss = total_loss / len(train_dataloader) if evaluation == True: val_loss, val_accuracy = evaluate(model, val_dataloader) time_elapsed = time.time() - t0_epoch set_seed(42) bert_classifier, optimizer, scheduler = initialize_model(epochs=2) train(bert_classifier, train_dataloader, val_dataloader, epochs=2, evaluation=True)
code
121150886/cell_1
[ "text_plain_output_1.png" ]
!pip install Arabic-Stopwords !pip install emoji # !pip install Tashaphyne # !pip install qalsadi # !pip install np_utils !pip install ydata-profiling
code
121150886/cell_8
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic')
code
121150886/cell_15
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import torch import torch.nn as nn from transformers import BertModel class BertClassifier(nn.Module): def __init__(self, freeze_bert=False, version='mini'): super(BertClassifier, self).__init__() D_in = 256 if version == 'mini' else 758 H = 50 D_out = 2 self.bert = AutoModel.from_pretrained('asafaya/bert-mini-arabic') if version == 'mini' else AutoModel.from_pretrained('asafaya/bert-base-arabic') self.classifier = nn.Sequential(nn.Linear(D_in, H), nn.ReLU(), nn.Dropout(0.5), nn.Linear(H, D_out)) if freeze_bert: for param in self.bert.parameters(): param.requires_grad = False def forward(self, input_ids, attention_mask): outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask) last_hidden_state_cls = outputs[0][:, 0, :] logits = self.classifier(last_hidden_state_cls) return logits
code
121150886/cell_12
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from transformers import AutoTokenizer, AutoModel import pandas as pd import re import torch import unicodedata df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') label_mapping = {'Positive': 1, 'Negative': 0} df_reviews = df_reviews[df_reviews.label != 'Mixed'] df_reviews.label = df_reviews.label.map(label_mapping) df_reviews.label.value_counts() if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') def text_preprocessing(text): text = unicodedata.normalize('NFC', text) text = re.sub('(@.*?)[\\s]', ' ', text) text = re.sub('&amp', '&', text) text = re.sub('\\s+', ' ', text).strip() text = re.sub('^https?:\\/\\/.*[\\r\\n]*', '<URL>', text) return text import emoji import unicodedata def preprocessing_for_bert(data, version='mini', text_preprocessing_fn=text_preprocessing): input_ids = [] attention_masks = [] tokenizer = AutoTokenizer.from_pretrained('asafaya/bert-mini-arabic') if version == 'mini' else AutoTokenizer.from_pretrained('asafaya/bert-base-arabic') for i, sent in enumerate(data): encoded_sent = tokenizer.encode_plus(text=text_preprocessing_fn(sent), add_special_tokens=True, max_length=MAX_LEN, padding='max_length', return_attention_mask=True, truncation=True) input_ids.append(encoded_sent.get('input_ids')) attention_masks.append(encoded_sent.get('attention_mask')) input_ids = torch.tensor(input_ids) attention_masks = torch.tensor(attention_masks) return (input_ids, attention_masks) from sklearn.model_selection import train_test_split X = df_reviews.text.values y = df_reviews.label.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5, random_state=42) MAX_LEN = 280 token_ids = list(preprocessing_for_bert([X[0]])[0].squeeze().numpy()) print(f'Original: {X[0]}') print(f'Token IDs: {token_ids}') print('Tokenizing data...') train_inputs, train_masks = preprocessing_for_bert(X_train) val_inputs, val_masks = preprocessing_for_bert(X_val)
code
121150886/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_reviews = pd.read_csv('/kaggle/input/arabic-100k-reviews/ar_reviews_100k.tsv', delimiter='\t') label_mapping = {'Positive': 1, 'Negative': 0} df_reviews = df_reviews[df_reviews.label != 'Mixed'] print(df_reviews.shape) df_reviews.label = df_reviews.label.map(label_mapping) df_reviews.label.value_counts()
code
128034284/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd def get_nan_dummy(series): """Given a Series containing NaN and several classes return a dummy Series indicating 0 for NaN and 1 for non-NaN data Parameters ---------- - series : pd.Series, input series or col to dummify Return ------ - s : pd.Series the dummy Series """ s = series.notna().astype(int).astype('category') s.name = f'{series.name}_abs_pres' return s data = pd.read_csv('./train.csv') data = data.drop('Id', axis=1) new_cols = data.columns.to_list() new_cols.reverse() data = data[new_cols] all_nan = data.isnull().sum().sort_values(ascending=False) all_nan = all_nan[all_nan > 0] cols = ['PoolQC', 'Alley', 'Fence', 'FireplaceQu', 'LotFrontage', 'MasVnrArea', 'GarageQual', 'BsmtQual'] abs_pres_series = [] for col in cols: abs_pres_series.append(get_nan_dummy(data[col])) abs_pres_series = pd.concat(abs_pres_series, axis=1) data = pd.concat([data, abs_pres_series], axis=1) a = data['BsmtExposure'].isna()[data['BsmtExposure'].isna() == True].index b = data['BsmtCond'].isna()[data['BsmtCond'].isna() == True].index mystery_row = [x for x in set(a) - set(b)] mystery_row = mystery_row[0] data.loc[mystery_row, ['BsmtExposure', 'BsmtFinType2', 'BsmtCond', 'BsmtQual']] data.drop(mystery_row, axis=0, inplace=True)
code
128034284/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import norm, zscore from sklearn.compose import make_column_transformer from sklearn.compose import make_column_selector from sklearn.ensemble import GradientBoostingRegressor, HistGradientBoostingRegressor, HistGradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, cross_val_score, RandomizedSearchCV from sklearn.model_selection import cross_validate from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.utils import shuffle from sklearn.experimental import enable_halving_search_cv from sklearn.model_selection import HalvingGridSearchCV, HalvingRandomSearchCV sns.set_theme()
code
2019512/cell_9
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) unique_cat = set(frame['position'].unique()) | {'unknown'} frame['position'] = pd.Categorical(frame['position'], categories=unique_cat) frame['position_'] = frame['position'].cat.codes frame['salary'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True) frame['salary_'] = frame['salary'].cat.codes from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000) sampled_index = frame_.index selector = VarianceThreshold(threshold=0.95 * (1 - 0.95)) frame_ = selector.fit_transform(frame_) mds = MDS(n_components=2) scaler = StandardScaler(with_mean=False) frame_ = scaler.fit_transform(frame_) frame_ = mds.fit_transform(frame_) pc_1 = [frame_[i][0] for i in range(len(frame_))] pc_2 = [frame_[i][1] for i in range(len(frame_))] ### Clustering with kmeans ### from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering def plot_pc (label, frame): pc_1 = frame[frame['label']==label].loc[:,'pc1'] pc_2 = frame[frame['label']==label].loc[:,'pc2'] plt.scatter(pc_1, pc_2, label=label) plt.legend() def plot_clusters(function, n_clusters, function_kwargs=None): if function_kwargs==None: function_kwargs = dict() model = function(n_clusters=n_clusters, **function_kwargs) labels = model.fit_predict(frame_) results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2}) for i in range(n_clusters): plot_pc(i, results) plt.title('{} ({} clusters)'.format(function.__name__, n_clusters)) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13)) for i, ax in zip(range(2,10,2), fig.axes): plt.subplot(ax) plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'}); fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13)) for i, ax in zip(range(2,10,2), fig.axes): plt.subplot(ax) plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward', 'affinity':'euclidean'}) n_clusters = list(range(1, 20)) score = [] for n in n_clusters: model = KMeans(n_clusters=n).fit(frame_) score.append(model.inertia_) plt.figure(figsize=(10, 8)) plt.plot(n_clusters, score) plt.xlabel('number of clusters') plt.ylabel('score') plt.title('Elbow Method') sns.despine() ax = plt.gca() ax.xaxis.get_ticklabels()
code
2019512/cell_6
[ "image_output_1.png" ]
from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) unique_cat = set(frame['position'].unique()) | {'unknown'} frame['position'] = pd.Categorical(frame['position'], categories=unique_cat) frame['position_'] = frame['position'].cat.codes frame['salary'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True) frame['salary_'] = frame['salary'].cat.codes from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000) sampled_index = frame_.index selector = VarianceThreshold(threshold=0.95 * (1 - 0.95)) frame_ = selector.fit_transform(frame_) print('{} features used'.format(frame_.shape[1])) mds = MDS(n_components=2) scaler = StandardScaler(with_mean=False) frame_ = scaler.fit_transform(frame_) frame_ = mds.fit_transform(frame_) pc_1 = [frame_[i][0] for i in range(len(frame_))] pc_2 = [frame_[i][1] for i in range(len(frame_))] plt.figure(figsize=(10, 8)) plt.scatter(pc_1, pc_2, color='green') plt.xlabel('pc 1') plt.ylabel('pc 2')
code
2019512/cell_2
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) frame.iloc[:, 5:].head()
code
2019512/cell_1
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) frame.iloc[:, :5].head()
code
2019512/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) unique_cat = set(frame['position'].unique()) | {'unknown'} frame['position'] = pd.Categorical(frame['position'], categories=unique_cat) frame['position_'] = frame['position'].cat.codes frame['salary'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True) frame['salary_'] = frame['salary'].cat.codes from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000) sampled_index = frame_.index selector = VarianceThreshold(threshold=0.95 * (1 - 0.95)) frame_ = selector.fit_transform(frame_) mds = MDS(n_components=2) scaler = StandardScaler(with_mean=False) frame_ = scaler.fit_transform(frame_) frame_ = mds.fit_transform(frame_) pc_1 = [frame_[i][0] for i in range(len(frame_))] pc_2 = [frame_[i][1] for i in range(len(frame_))] from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering def plot_pc(label, frame): pc_1 = frame[frame['label'] == label].loc[:, 'pc1'] pc_2 = frame[frame['label'] == label].loc[:, 'pc2'] plt.scatter(pc_1, pc_2, label=label) plt.legend() def plot_clusters(function, n_clusters, function_kwargs=None): if function_kwargs == None: function_kwargs = dict() model = function(n_clusters=n_clusters, **function_kwargs) labels = model.fit_predict(frame_) results = pd.DataFrame({'label': labels, 'pc1': pc_1, 'pc2': pc_2}) for i in range(n_clusters): plot_pc(i, results) plt.title('{} ({} clusters)'.format(function.__name__, n_clusters)) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 13)) for i, ax in zip(range(2, 10, 2), fig.axes): plt.subplot(ax) plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'})
code
2019512/cell_8
[ "image_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) unique_cat = set(frame['position'].unique()) | {'unknown'} frame['position'] = pd.Categorical(frame['position'], categories=unique_cat) frame['position_'] = frame['position'].cat.codes frame['salary'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True) frame['salary_'] = frame['salary'].cat.codes from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000) sampled_index = frame_.index selector = VarianceThreshold(threshold=0.95 * (1 - 0.95)) frame_ = selector.fit_transform(frame_) mds = MDS(n_components=2) scaler = StandardScaler(with_mean=False) frame_ = scaler.fit_transform(frame_) frame_ = mds.fit_transform(frame_) pc_1 = [frame_[i][0] for i in range(len(frame_))] pc_2 = [frame_[i][1] for i in range(len(frame_))] ### Clustering with kmeans ### from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering def plot_pc (label, frame): pc_1 = frame[frame['label']==label].loc[:,'pc1'] pc_2 = frame[frame['label']==label].loc[:,'pc2'] plt.scatter(pc_1, pc_2, label=label) plt.legend() def plot_clusters(function, n_clusters, function_kwargs=None): if function_kwargs==None: function_kwargs = dict() model = function(n_clusters=n_clusters, **function_kwargs) labels = model.fit_predict(frame_) results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2}) for i in range(n_clusters): plot_pc(i, results) plt.title('{} ({} clusters)'.format(function.__name__, n_clusters)) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13)) for i, ax in zip(range(2,10,2), fig.axes): plt.subplot(ax) plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'}); fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 13)) for i, ax in zip(range(2, 10, 2), fig.axes): plt.subplot(ax) plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward', 'affinity': 'euclidean'})
code
2019512/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False)
code
2019512/cell_10
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering from sklearn.feature_selection import VarianceThreshold from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd from subprocess import check_output frame = pd.read_csv('../input/HR_comma_sep.csv').rename(columns={'sales': 'position'}) pd.DataFrame({'dtypes': frame.dtypes, 'isnull': pd.isnull(frame).any(), 'count distinct': [len(frame[name].unique()) for name in frame.columns], 'max': frame.select_dtypes(exclude=['object']).max(), 'min': frame.select_dtypes(exclude=['object']).min(), 'std': frame.select_dtypes(exclude=['object']).std()}, index=frame.columns).sort_values('count distinct', ascending=False) unique_cat = set(frame['position'].unique()) | {'unknown'} frame['position'] = pd.Categorical(frame['position'], categories=unique_cat) frame['position_'] = frame['position'].cat.codes frame['salary'] = pd.Categorical(frame['salary'], categories=['low', 'medium', 'unknown', 'high'], ordered=True) frame['salary_'] = frame['salary'].cat.codes from sklearn.manifold import MDS from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold frame_ = pd.get_dummies(frame.drop(['salary_', 'position_'], axis=1)).sample(3000) sampled_index = frame_.index selector = VarianceThreshold(threshold=0.95 * (1 - 0.95)) frame_ = selector.fit_transform(frame_) mds = MDS(n_components=2) scaler = StandardScaler(with_mean=False) frame_ = scaler.fit_transform(frame_) frame_ = mds.fit_transform(frame_) pc_1 = [frame_[i][0] for i in range(len(frame_))] pc_2 = [frame_[i][1] for i in range(len(frame_))] ### Clustering with kmeans ### from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering def plot_pc (label, frame): pc_1 = frame[frame['label']==label].loc[:,'pc1'] pc_2 = frame[frame['label']==label].loc[:,'pc2'] plt.scatter(pc_1, pc_2, label=label) plt.legend() def plot_clusters(function, n_clusters, function_kwargs=None): if function_kwargs==None: function_kwargs = dict() model = function(n_clusters=n_clusters, **function_kwargs) labels = model.fit_predict(frame_) results = pd.DataFrame({'label':labels, 'pc1':pc_1, 'pc2':pc_2}) for i in range(n_clusters): plot_pc(i, results) plt.title('{} ({} clusters)'.format(function.__name__, n_clusters)) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13)) for i, ax in zip(range(2,10,2), fig.axes): plt.subplot(ax) plot_clusters(KMeans, n_clusters=i, function_kwargs={'init': 'random'}); fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(15,13)) for i, ax in zip(range(2,10,2), fig.axes): plt.subplot(ax) plot_clusters(AgglomerativeClustering, n_clusters=i, function_kwargs={'linkage': 'ward', 'affinity':'euclidean'}) n_clusters = list(range(1, 20)) score = [] for n in n_clusters: model = KMeans(n_clusters=n).fit(frame_) score.append(model.inertia_) sns.despine() ax = plt.gca() ax.xaxis.get_ticklabels() n_clusters = 5 model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', affinity='euclidean') model.fit(frame_) labels = model.labels_ results = pd.DataFrame({'label': labels, 'pc1': pc_1, 'pc2': pc_2}) plt.figure(figsize=(10, 8)) for i in range(n_clusters): plot_pc(i, results)
code