path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90122454/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.kurt() data_train.Age.skew() sns.displot(data_train.SibSp, kde=True)
code
90122454/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.kurt() data_train.Age.skew()
code
90122454/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) sns.displot(data_train.Pclass, kde=False)
code
90122454/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Pclass.describe()
code
90122454/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.kurt() data_train.Age.skew() data_train.SibSp.describe()
code
90122454/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.kurt()
code
90122454/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) sns.displot(data_train.Survived, kde=False)
code
90122454/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) plt.figure(figsize=(24, 4)) sns.barplot(x=data_train.index[0:200], y=data_train.Survived[0:200])
code
90122454/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.kurt() data_train.Age.skew() data_train.Parch.describe()
code
90122454/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Sex.describe()
code
90122454/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Survived.describe()
code
90122454/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train['Name'].value_counts()
code
90122454/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.head()
code
90122454/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.Age.describe()
code
90122454/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_test = pd.read_csv(dirname + '/' + filenames[1]) data_train = pd.read_csv(dirname + '/' + filenames[0]) data_train.PassengerId.describe()
code
90122454/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import norm, expon from sklearn.feature_selection import mutual_info_classif, mutual_info_regression from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler, minmax_scale from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score, train_test_split, cross_val_predict from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.linear_model import RidgeClassifierCV from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from keras.layers import Dense from keras.models import Sequential from keras.regularizers import l1 from lightgbm import LGBMClassifier from optuna import create_study from optuna.visualization import plot_optimization_history, plot_parallel_coordinate, plot_contour, plot_slice, plot_param_importances, plot_edf import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90137202/cell_42
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'int_memory', perc=True)
code
90137202/cell_13
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes
code
90137202/cell_9
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape
code
90137202/cell_30
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'days_used')
code
90137202/cell_33
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'brand_name', perc=True)
code
90137202/cell_44
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'selfie_camera_mp', perc=True)
code
90137202/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'used_price')
code
90137202/cell_40
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'ram', perc=True)
code
90137202/cell_39
[ "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T df['ram'].nunique()
code
90137202/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'weight')
code
90137202/cell_48
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot df.groupby('brand_name')['ram'].mean() plt.xticks(rotation=90) plt.xticks(rotation=90) numeric_columns = df.select_dtypes(include=np.number).columns.tolist() plt.figure(figsize=(15, 7)) sns.heatmap(df[numeric_columns].corr(), annot=True, vmin=-1, vmax=1, fmt='.2f', cmap='Spectral') plt.show()
code
90137202/cell_41
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'release_year', perc=True)
code
90137202/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90137202/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.head()
code
90137202/cell_51
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot df.groupby('brand_name')['ram'].mean() plt.xticks(rotation=90) plt.xticks(rotation=90) numeric_columns = df.select_dtypes(include=np.number).columns.tolist() df.isnull().sum()
code
90137202/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'screen_size')
code
90137202/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1)
code
90137202/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum()
code
90137202/cell_38
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, '5g', perc=True)
code
90137202/cell_47
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot df.groupby('brand_name')['ram'].mean() plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) sns.barplot(data=df, y='ram', x='brand_name') plt.xticks(rotation=90) plt.subplot(1, 2, 2) sns.boxplot(data=df, y='ram', x='brand_name') plt.xticks(rotation=90) plt.show()
code
90137202/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T
code
90137202/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'os', perc=True)
code
90137202/cell_43
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, 'main_camera_mp', perc=True)
code
90137202/cell_46
[ "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T df.groupby('brand_name')['ram'].mean()
code
90137202/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'battery')
code
90137202/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum()
code
90137202/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram histogram_boxplot(df, 'new_price')
code
90137202/cell_53
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot df.groupby('brand_name')['ram'].mean() plt.xticks(rotation=90) plt.xticks(rotation=90) numeric_columns = df.select_dtypes(include=np.number).columns.tolist() df.isnull().sum() df.isnull().sum()
code
90137202/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape data.info()
code
90137202/cell_37
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score import statsmodels.api as sm pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 200) data = pd.read_csv('/kaggle/input/used-phone-data/used_phone_data.csv') data.sample(n=5, random_state=1) data.shape df = data.copy() category_col = df.select_dtypes(exclude=np.number).columns.tolist() df[category_col] = df[category_col].astype('category') df.dtypes df.isnull().sum() df.isnull().sum() df.describe(include='all').T # function to plot a boxplot and a histogram along the same scale. def histogram_boxplot(data, feature, figsize=(12, 7), kde=False, bins=None): """ Boxplot and histogram combined data: dataframe feature: dataframe column figsize: size of figure (default (12,7)) kde: whether to the show density curve (default False) bins: number of bins for histogram (default None) """ f2, (ax_box2, ax_hist2) = plt.subplots( nrows=2, # Number of rows of the subplot grid= 2 sharex=True, # x-axis will be shared among all subplots gridspec_kw={"height_ratios": (0.25, 0.75)}, figsize=figsize, ) # creating the 2 subplots sns.boxplot( data=data, x=feature, ax=ax_box2, showmeans=True, color="violet" ) # boxplot will be created and a star will indicate the mean value of the column sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2, bins=bins, palette="winter" ) if bins else sns.histplot( data=data, x=feature, kde=kde, ax=ax_hist2 ) # For histogram ax_hist2.axvline( data[feature].mean(), color="green", linestyle="--" ) # Add mean to the histogram ax_hist2.axvline( data[feature].median(), color="black", linestyle="-" ) # Add median to the histogram # function to create labeled barplots for categorical and numerical variables def labeled_barplot(data, feature, perc=False, n=None): """ Barplot with percentage at the top data: dataframe feature: dataframe column perc: whether to display percentages instead of count (default is False) n: displays the top n category levels (default is None, i.e., display all levels) """ total = len(data[feature]) # length of the column count = data[feature].nunique() if n is None: plt.figure(figsize=(count + 1, 5)) else: plt.figure(figsize=(n + 1, 5)) plt.xticks(rotation=90, fontsize=15) ax = sns.countplot( data=data, x=feature, palette="Paired", order=data[feature].value_counts().index[:n].sort_values(), ) for p in ax.patches: if perc == True: label = "{:.1f}%".format( 100 * p.get_height() / total ) # percentage of each class of the category else: label = p.get_height() # count of each level of the category x = p.get_x() + p.get_width() / 2 # width of the plot y = p.get_height() # height of the plot ax.annotate( label, (x, y), ha="center", va="center", size=12, xytext=(0, 5), textcoords="offset points", ) # annotate the percentage # show the plot labeled_barplot(df, '4g', perc=True)
code
73081309/cell_4
[ "image_output_1.png" ]
import histomicstk as htk import matplotlib.pyplot as plt input_image_file = ('https://data.kitware.com/api/v1/file/' '576ad39b8d777f1ecd6702f2/download') # Easy1.png im_input = skimage.io.imread(input_image_file)[:, :, :3] plt.imshow(im_input) _ = plt.title('Input Image', fontsize=16) ref_image_file = 'https://data.kitware.com/api/v1/file/57718cc28d777f1ecd8a883c/download' im_reference = skimage.io.imread(ref_image_file)[:, :, :3] mean_ref, std_ref = htk.preprocessing.color_conversion.lab_mean_std(im_reference) im_nmzd = htk.preprocessing.color_normalization.reinhard(im_input, mean_ref, std_ref) plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(im_reference) _ = plt.title('Reference Image', fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_nmzd) _ = plt.title('Normalized Input Image', fontsize=titlesize)
code
73081309/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import histomicstk as htk import matplotlib.patches as mpatches import matplotlib.pyplot as plt import numpy as np import scipy as sp input_image_file = ('https://data.kitware.com/api/v1/file/' '576ad39b8d777f1ecd6702f2/download') # Easy1.png im_input = skimage.io.imread(input_image_file)[:, :, :3] plt.imshow(im_input) _ = plt.title('Input Image', fontsize=16) # Load reference image for normalization ref_image_file = ('https://data.kitware.com/api/v1/file/' '57718cc28d777f1ecd8a883c/download') # L1.png im_reference = skimage.io.imread(ref_image_file)[:, :, :3] # get mean and stddev of reference image in lab space mean_ref, std_ref = htk.preprocessing.color_conversion.lab_mean_std(im_reference) # perform reinhard color normalization im_nmzd = htk.preprocessing.color_normalization.reinhard(im_input, mean_ref, std_ref) # Display results plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(im_reference) _ = plt.title('Reference Image', fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_nmzd) _ = plt.title('Normalized Input Image', fontsize=titlesize) # create stain to color map stainColorMap = { 'hematoxylin': [0.65, 0.70, 0.29], 'eosin': [0.07, 0.99, 0.11], 'dab': [0.27, 0.57, 0.78], 'null': [0.0, 0.0, 0.0] } # specify stains of input image stain_1 = 'hematoxylin' # nuclei stain stain_2 = 'eosin' # cytoplasm stain stain_3 = 'null' # set to null of input contains only two stains # create stain matrix W = np.array([stainColorMap[stain_1], stainColorMap[stain_2], stainColorMap[stain_3]]).T # perform standard color deconvolution im_stains = htk.preprocessing.color_deconvolution.color_deconvolution(im_nmzd, W).Stains # Display results plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(im_stains[:, :, 0]) plt.title(stain_1, fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_stains[:, :, 1]) _ = plt.title(stain_2, fontsize=titlesize) im_nuclei_stain = im_stains[:, :, 0] foreground_threshold = 60 im_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(im_nuclei_stain < foreground_threshold) min_radius = 10 max_radius = 15 im_log_max, im_sigma_max = htk.filters.shape.cdog(im_nuclei_stain, im_fgnd_mask, sigma_min=min_radius * np.sqrt(2), sigma_max=max_radius * np.sqrt(2)) local_max_search_radius = 10 im_nuclei_seg_mask, seeds, maxima = htk.segmentation.nuclear.max_clustering(im_log_max, im_fgnd_mask, local_max_search_radius) min_nucleus_area = 80 im_nuclei_seg_mask = htk.segmentation.label.area_open(im_nuclei_seg_mask, min_nucleus_area).astype(np.int) objProps = skimage.measure.regionprops(im_nuclei_seg_mask) print('Number of nuclei = ', len(objProps)) plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(skimage.color.label2rgb(im_nuclei_seg_mask, im_input, bg_label=0), origin='lower') plt.title('Nuclei segmentation mask overlay', fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_input) plt.xlim([0, im_input.shape[1]]) plt.ylim([0, im_input.shape[0]]) plt.title('Nuclei bounding boxes', fontsize=titlesize) for i in range(len(objProps)): c = [objProps[i].centroid[1], objProps[i].centroid[0], 0] width = objProps[i].bbox[3] - objProps[i].bbox[1] + 1 height = objProps[i].bbox[2] - objProps[i].bbox[0] + 1 cur_bbox = {'type': 'rectangle', 'center': c, 'width': width, 'height': height} plt.plot(c[0], c[1], 'g+') mrect = mpatches.Rectangle([c[0] - 0.5 * width, c[1] - 0.5 * height], width, height, fill=False, ec='g', linewidth=2) plt.gca().add_patch(mrect)
code
73081309/cell_1
[ "text_plain_output_1.png" ]
!pip install histomicstk --find-links https://girder.github.io/large_image_wheels
code
73081309/cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt input_image_file = 'https://data.kitware.com/api/v1/file/576ad39b8d777f1ecd6702f2/download' im_input = skimage.io.imread(input_image_file)[:, :, :3] plt.imshow(im_input) _ = plt.title('Input Image', fontsize=16)
code
73081309/cell_5
[ "image_output_1.png" ]
import histomicstk as htk import matplotlib.pyplot as plt import numpy as np input_image_file = ('https://data.kitware.com/api/v1/file/' '576ad39b8d777f1ecd6702f2/download') # Easy1.png im_input = skimage.io.imread(input_image_file)[:, :, :3] plt.imshow(im_input) _ = plt.title('Input Image', fontsize=16) # Load reference image for normalization ref_image_file = ('https://data.kitware.com/api/v1/file/' '57718cc28d777f1ecd8a883c/download') # L1.png im_reference = skimage.io.imread(ref_image_file)[:, :, :3] # get mean and stddev of reference image in lab space mean_ref, std_ref = htk.preprocessing.color_conversion.lab_mean_std(im_reference) # perform reinhard color normalization im_nmzd = htk.preprocessing.color_normalization.reinhard(im_input, mean_ref, std_ref) # Display results plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(im_reference) _ = plt.title('Reference Image', fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_nmzd) _ = plt.title('Normalized Input Image', fontsize=titlesize) stainColorMap = {'hematoxylin': [0.65, 0.7, 0.29], 'eosin': [0.07, 0.99, 0.11], 'dab': [0.27, 0.57, 0.78], 'null': [0.0, 0.0, 0.0]} stain_1 = 'hematoxylin' stain_2 = 'eosin' stain_3 = 'null' W = np.array([stainColorMap[stain_1], stainColorMap[stain_2], stainColorMap[stain_3]]).T im_stains = htk.preprocessing.color_deconvolution.color_deconvolution(im_nmzd, W).Stains plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) plt.imshow(im_stains[:, :, 0]) plt.title(stain_1, fontsize=titlesize) plt.subplot(1, 2, 2) plt.imshow(im_stains[:, :, 1]) _ = plt.title(stain_2, fontsize=titlesize)
code
1005122/cell_3
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/HR_comma_sep.csv') data.head()
code
72101196/cell_21
[ "text_html_output_1.png" ]
from matplotlib.lines import Line2D import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() err_series = lin_reg.params - lin_reg.conf_int()[0] err_series coef_df = pd.DataFrame({'coef': lin_reg.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:]}) coef_df formula_1 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-1]) mod_1 = smf.ols(formula_1, data=df).fit() mod_1.params formula_2 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-2].tolist() + ['infant_mort']) mod_2 = smf.ols(formula_2, data=df).fit() mod_2.params coef_df = pd.DataFrame() for i, mod in enumerate([mod_1, mod_2]): err_series = mod.params - mod.conf_int()[0] coef_df = coef_df.append(pd.DataFrame({'coef': mod.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:], 'model': 'model %d' % (i + 1)})) coef_df marker_list = 'so' width = 0.25 base_x = pd.np.arange(5) - 0.2 base_x fig, ax = plt.subplots(figsize=(8, 5)) for i, mod in enumerate(coef_df.model.unique()): mod_df = coef_df[coef_df.model == mod] mod_df = mod_df.set_index('varname').reindex(coef_df['varname'].unique()) X = base_x + width * i ax.bar(X, mod_df['coef'], color='none', yerr=mod_df['err']) ax.set_ylabel('') ax.set_xlabel('') ax.scatter(x=X, marker=marker_list[i], s=120, y=mod_df['coef'], color='black') ax.axhline(y=0, linestyle='--', color='black', linewidth=4) ax.xaxis.set_ticks_position('none') _ = ax.set_xticklabels(['', 'Agriculture', 'Exam', 'Edu.', 'Catholic', 'Infant Mort.'], rotation=0, fontsize=16) fs = 16 ax.annotate('Control', xy=(0.3, -0.2), xytext=(0.3, -0.35), xycoords='axes fraction', textcoords='axes fraction', fontsize=fs, ha='center', va='bottom', bbox=dict(boxstyle='square', fc='white', ec='black'), arrowprops=dict(arrowstyle='-[, widthB=6.5, lengthB=1.2', lw=2.0, color='black')) ax.annotate('Study', xy=(0.8, -0.2), xytext=(0.8, -0.35), xycoords='axes fraction', textcoords='axes fraction', fontsize=fs, ha='center', va='bottom', bbox=dict(boxstyle='square', fc='white', ec='black'), arrowprops=dict(arrowstyle='-[, widthB=3.5, lengthB=1.2', lw=2.0, color='black')) legend_elements = [Line2D([0], [0], marker=m, label='Model %d' % i, color='k', markersize=10) for i, m in enumerate(marker_list)] _ = ax.legend(handles=legend_elements, loc=2, prop={'size': 15}, labelspacing=1.2)
code
72101196/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() err_series = lin_reg.params - lin_reg.conf_int()[0] err_series coef_df = pd.DataFrame({'coef': lin_reg.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:]}) coef_df
code
72101196/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula
code
72101196/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() err_series = lin_reg.params - lin_reg.conf_int()[0] err_series coef_df = pd.DataFrame({'coef': lin_reg.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:]}) coef_df formula_1 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-1]) mod_1 = smf.ols(formula_1, data=df).fit() mod_1.params formula_2 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-2].tolist() + ['infant_mort']) mod_2 = smf.ols(formula_2, data=df).fit() mod_2.params coef_df = pd.DataFrame() for i, mod in enumerate([mod_1, mod_2]): err_series = mod.params - mod.conf_int()[0] coef_df = coef_df.append(pd.DataFrame({'coef': mod.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:], 'model': 'model %d' % (i + 1)})) coef_df marker_list = 'so' width = 0.25 base_x = pd.np.arange(5) - 0.2 base_x
code
72101196/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.head()
code
72101196/cell_18
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() err_series = lin_reg.params - lin_reg.conf_int()[0] err_series coef_df = pd.DataFrame({'coef': lin_reg.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:]}) coef_df formula_1 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-1]) mod_1 = smf.ols(formula_1, data=df).fit() mod_1.params formula_2 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-2].tolist() + ['infant_mort']) mod_2 = smf.ols(formula_2, data=df).fit() mod_2.params coef_df = pd.DataFrame() for i, mod in enumerate([mod_1, mod_2]): err_series = mod.params - mod.conf_int()[0] coef_df = coef_df.append(pd.DataFrame({'coef': mod.params.values[1:], 'err': err_series.values[1:], 'varname': err_series.index.values[1:], 'model': 'model %d' % (i + 1)})) coef_df
code
72101196/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() formula_1 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-1]) print(formula_1) mod_1 = smf.ols(formula_1, data=df).fit() mod_1.params
code
72101196/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() formula_1 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-1]) mod_1 = smf.ols(formula_1, data=df).fit() mod_1.params formula_2 = 'fertility ~ %s' % ' + '.join(df.columns.values[1:-2].tolist() + ['infant_mort']) print(formula_2) mod_2 = smf.ols(formula_2, data=df).fit() mod_2.params
code
72101196/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary()
code
72101196/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import statsmodels.formula.api as smf url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/swiss.csv' df = pd.read_csv(url, index_col=0) df.columns = ['fertility', 'agri', 'exam', 'edu', 'catholic', 'infant_mort'] formula = 'fertility ~ %s' % ' + '.join(df.columns.values[1:]) formula lin_reg = smf.ols(formula, data=df).fit() lin_reg.summary() err_series = lin_reg.params - lin_reg.conf_int()[0] err_series
code
106209898/cell_4
[ "text_plain_output_1.png" ]
def add(a, b): pass def add(a, b): return a + b addition = add(2, 10) addition = addition + 10 print(addition)
code
106209898/cell_6
[ "text_plain_output_1.png" ]
def add(a, b): pass def add(a, b): return a + b def add(a, b): add = a + b sub = a - b div = a / b mul = a * b return (add, sub, div, mul) a, b, c, d = add(2, 10) print(a, b, c, d)
code
106209898/cell_2
[ "text_plain_output_1.png" ]
def add(a, b): pass add(2, 10)
code
2001740/cell_4
[ "text_plain_output_1.png" ]
from datetime import datetime from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') pd_hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') pd_hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') pd_store_id_relation = pd.read_csv('../input/store_id_relation.csv') pd_sample_submission = pd.read_csv('../input/sample_submission.csv') from datetime import datetime pd_air_reserve['new_visit_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_visit_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_reserve_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve['new_reserve_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve.groupby(['air_store_id']['new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max, mean]}) pd_air_reserve_summ = pd_air_reserve.groupby(['air_store_id', 'new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max]}) print(pd_air_reserve_summ.describe(include='all').transpose()) pd_air_reserve_summ.head() print(pd_air_reserve_summ.query('air_store_id=="air_00a91d42b08b08d9"'))
code
2001740/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import plotly.plotly as py import numpy as np import pandas as pd from subprocess import check_output pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') pd_hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') pd_hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') pd_store_id_relation = pd.read_csv('../input/store_id_relation.csv') pd_sample_submission = pd.read_csv('../input/sample_submission.csv') from datetime import datetime pd_air_reserve['new_visit_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_visit_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_reserve_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve['new_reserve_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve.groupby(['air_store_id']['new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max, mean]}) pd_air_reserve_summ = pd_air_reserve.groupby(['air_store_id', 'new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max]}) temp = pd_air_reserve_summ.query('air_store_id=="air_00a91d42b08b08d9"') x = temp['new_visit_date'] import plotly.plotly as py import plotly.graph_objs as go pd_air_reserve_summ.index x = pd_air_reserve_summ['air_00a91d42b08b08d9'] y = pd_air_reserve_summ['air_store_id' == 'air_00a91d42b08b08d9', 'reserve_visitors', 'sum'] trace0 = go.Scatter(x, y, name='High 2014', line=dict(color='rgb(205, 12, 24)', width=4)) data = [trace0] layout = dict(title='Average High and Low Temperatures in New York', xaxis=dict(title='Month'), yaxis=dict(title='Temperature (degrees F)')) fig = dict(data=data, layout=layout) py.iplot(fig, filename='styled-line')
code
2001740/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') pd_hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') pd_hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') pd_store_id_relation = pd.read_csv('../input/store_id_relation.csv') pd_sample_submission = pd.read_csv('../input/sample_submission.csv') print(pd_hpg_reserve.head()) print(pd_hpg_store_info.head()) print(pd_store_id_relation.head()) print(pd_sample_submission.head()) pd_air_reserve.describe().transpose()
code
2001740/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') print(pd_air_reserve.tail()) print(pd_air_store_info.tail()) print(pd_air_visit_data.tail()) print(pd_date_info.head())
code
2001740/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from datetime import datetime from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') pd_hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') pd_hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') pd_store_id_relation = pd.read_csv('../input/store_id_relation.csv') pd_sample_submission = pd.read_csv('../input/sample_submission.csv') print(pd_air_reserve.describe(include='all').transpose()) from datetime import datetime pd_air_reserve['new_visit_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_visit_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_reserve_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve['new_reserve_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['reserve_datetime']] print(pd_air_reserve.describe(include='all').transpose()) pd_air_reserve.groupby(['air_store_id']['new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max, mean]})
code
2001740/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output pd_air_reserve = pd.read_csv('../input/air_reserve.csv') pd_air_store_info = pd.read_csv('../input/air_store_info.csv') pd_air_visit_data = pd.read_csv('../input/air_visit_data.csv') pd_date_info = pd.read_csv('../input/date_info.csv') pd_hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') pd_hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') pd_store_id_relation = pd.read_csv('../input/store_id_relation.csv') pd_sample_submission = pd.read_csv('../input/sample_submission.csv') from datetime import datetime pd_air_reserve['new_visit_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_visit_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['visit_datetime']] pd_air_reserve['new_reserve_date'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').date() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve['new_reserve_time'] = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S').time() for d in pd_air_reserve['reserve_datetime']] pd_air_reserve.groupby(['air_store_id']['new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max, mean]}) pd_air_reserve_summ = pd_air_reserve.groupby(['air_store_id', 'new_visit_date']).agg({'reserve_visitors': sum, 'new_visit_time': [min, max]}) temp = pd_air_reserve_summ.query('air_store_id=="air_00a91d42b08b08d9"') x = temp['new_visit_date']
code
1006177/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False) x_train = vec.fit_transform(x_train.to_dict(orient='record')) x_test = vec.transform(x_test.to_dict(orient='record')) vec.get_feature_names()
code
1006177/cell_7
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_x = data_train[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] data_y = data_train['Survived'] age_mean = data_x['Age'].dropna().median() fare_mean = data_x['Fare'].dropna().median() data_x['Age'].fillna(age_mean, inplace=True) data_x['Fare'].fillna(fare_mean, inplace=True) data_x['Embarked'].fillna('S', inplace=True) for i in range(1, 4): data_x.loc[data_x.Pclass == i, 'Pclass'] = str(i) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.25, random_state=33) y_train.value_counts()
code
1006177/cell_18
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction import DictVectorizer from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_x = data_train[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] data_y = data_train['Survived'] age_mean = data_x['Age'].dropna().median() fare_mean = data_x['Fare'].dropna().median() data_x['Age'].fillna(age_mean, inplace=True) data_x['Fare'].fillna(fare_mean, inplace=True) data_x['Embarked'].fillna('S', inplace=True) for i in range(1, 4): data_x.loc[data_x.Pclass == i, 'Pclass'] = str(i) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.25, random_state=33) y_train.value_counts() from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False) x_train = vec.fit_transform(x_train.to_dict(orient='record')) x_test = vec.transform(x_test.to_dict(orient='record')) vec.get_feature_names() from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(x_train, y_train) dtc_y_predict = dtc.predict(x_test) dtc.score(x_test, y_test) run_x = data_test[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] run_x['Age'].fillna(age_mean, inplace=True) run_x['Fare'].fillna(fare_mean, inplace=True) for i in range(1, 4): run_x.loc[run_x.Pclass == i, 'Pclass'] = str(i) run_x = vec.transform(run_x.to_dict(orient='record')) run_y_predict = dtc.predict(run_x)
code
1006177/cell_15
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction import DictVectorizer from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_x = data_train[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] data_y = data_train['Survived'] age_mean = data_x['Age'].dropna().median() fare_mean = data_x['Fare'].dropna().median() data_x['Age'].fillna(age_mean, inplace=True) data_x['Fare'].fillna(fare_mean, inplace=True) data_x['Embarked'].fillna('S', inplace=True) for i in range(1, 4): data_x.loc[data_x.Pclass == i, 'Pclass'] = str(i) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.25, random_state=33) y_train.value_counts() from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False) x_train = vec.fit_transform(x_train.to_dict(orient='record')) x_test = vec.transform(x_test.to_dict(orient='record')) vec.get_feature_names() from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(x_train, y_train) dtc_y_predict = dtc.predict(x_test) dtc.score(x_test, y_test)
code
1006177/cell_16
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_extraction import DictVectorizer from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_x = data_train[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] data_y = data_train['Survived'] age_mean = data_x['Age'].dropna().median() fare_mean = data_x['Fare'].dropna().median() data_x['Age'].fillna(age_mean, inplace=True) data_x['Fare'].fillna(fare_mean, inplace=True) data_x['Embarked'].fillna('S', inplace=True) for i in range(1, 4): data_x.loc[data_x.Pclass == i, 'Pclass'] = str(i) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.25, random_state=33) y_train.value_counts() from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False) x_train = vec.fit_transform(x_train.to_dict(orient='record')) x_test = vec.transform(x_test.to_dict(orient='record')) vec.get_feature_names() from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(x_train, y_train) dtc_y_predict = dtc.predict(x_test) print(classification_report(y_test, dtc_y_predict, target_names=['died', 'surived']))
code
1006177/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_train.info()
code
1006177/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/train.csv') data_test = pd.read_csv('../input/test.csv') data_x = data_train[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked']] data_y = data_train['Survived'] age_mean = data_x['Age'].dropna().median() fare_mean = data_x['Fare'].dropna().median() data_x['Age'].fillna(age_mean, inplace=True) data_x['Fare'].fillna(fare_mean, inplace=True) data_x['Embarked'].fillna('S', inplace=True) for i in range(1, 4): data_x.loc[data_x.Pclass == i, 'Pclass'] = str(i) data_x.head()
code
17101114/cell_13
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain)
code
17101114/cell_25
[ "text_plain_output_1.png" ]
from sklearn.externals import joblib from sklearn.linear_model import LogisticRegression LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain) joblib_file = 'joblib_RL_Model.pkl' joblib.dump(LR_Model, joblib_file)
code
17101114/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.externals import joblib
code
17101114/cell_29
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.externals import joblib from sklearn.linear_model import LogisticRegression import pickle LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain) Pkl_Filename = 'Pickle_RL_Model.pkl' with open(Pkl_Filename, 'wb') as file: pickle.dump(LR_Model, file) with open(Pkl_Filename, 'rb') as file: Pickled_LR_Model = pickle.load(file) Pickled_LR_Model score = Pickled_LR_Model.score(Xtest, Ytest) Ypredict = Pickled_LR_Model.predict(Xtest) Ypredict joblib_file = 'joblib_RL_Model.pkl' joblib.dump(LR_Model, joblib_file) joblib_LR_model = joblib.load(joblib_file) joblib_LR_model score = joblib_LR_model.score(Xtest, Ytest) print('Test score: {0:.2f} %'.format(100 * score)) Ypredict = joblib_LR_model.predict(Xtest) Ypredict
code
17101114/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pickle LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain) Pkl_Filename = 'Pickle_RL_Model.pkl' with open(Pkl_Filename, 'wb') as file: pickle.dump(LR_Model, file) with open(Pkl_Filename, 'rb') as file: Pickled_LR_Model = pickle.load(file) Pickled_LR_Model score = Pickled_LR_Model.score(Xtest, Ytest) print('Test score: {0:.2f} %'.format(100 * score)) Ypredict = Pickled_LR_Model.predict(Xtest) Ypredict
code
17101114/cell_18
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pickle LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain) Pkl_Filename = 'Pickle_RL_Model.pkl' with open(Pkl_Filename, 'wb') as file: pickle.dump(LR_Model, file) with open(Pkl_Filename, 'rb') as file: Pickled_LR_Model = pickle.load(file) Pickled_LR_Model
code
17101114/cell_27
[ "text_plain_output_1.png" ]
from sklearn.externals import joblib from sklearn.linear_model import LogisticRegression LR_Model = LogisticRegression(C=0.1, max_iter=20, fit_intercept=True, n_jobs=3, solver='liblinear') LR_Model.fit(Xtrain, Ytrain) joblib_file = 'joblib_RL_Model.pkl' joblib.dump(LR_Model, joblib_file) joblib_LR_model = joblib.load(joblib_file) joblib_LR_model
code
129022282/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() df_train['Status'].value_counts()
code
129022282/cell_25
[ "text_html_output_1.png" ]
from imblearn.over_sampling import SMOTE from sklearn.ensemble import RandomForestClassifier smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) rf = RandomForestClassifier(max_depth=3, random_state=0) rf.fit(xtrain_res, ytrain_res)
code
129022282/cell_23
[ "image_output_1.png" ]
from imblearn.over_sampling import SMOTE smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) print(f'Distribuition BEFORE balancing:\n{ytrain.value_counts()}') print() print(f'Distribuition AFTER balancing:\n{ytrain_res.value_counts()}')
code
129022282/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_test.head()
code
129022282/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from imblearn.over_sampling import SMOTE from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) rf = RandomForestClassifier(max_depth=3, random_state=0) rf.fit(xtrain_res, ytrain_res) ypred = rf.predict(xtest) print('Cccuracy:', accuracy_score(ytest, ypred))
code
129022282/cell_19
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() columns_list = list(df_train.columns) for i in range(len(columns_list)): plt.xticks(rotation=45) plt.tight_layout() le = LabelEncoder() str_col = df_train.select_dtypes(include='object').columns for c in str_col: df_train[c] = le.fit_transform(df_train[c]) df_train.head()
code
129022282/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.info()
code
129022282/cell_18
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() columns_list = list(df_train.columns) for i in range(len(columns_list)): plt.xticks(rotation=45) plt.tight_layout() le = LabelEncoder() str_col = df_train.select_dtypes(include='object').columns for c in str_col: df_train[c] = le.fit_transform(df_train[c]) plt.figure(figsize=(16, 10)) sns.heatmap(df_train.corr(), annot=True)
code
129022282/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum()
code
129022282/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() columns_list = list(df_train.columns) for i in range(len(columns_list)): plt.xticks(rotation=45) plt.tight_layout() le = LabelEncoder() str_col = df_train.select_dtypes(include='object').columns for c in str_col: df_train[c] = le.fit_transform(df_train[c]) df_train.head()
code
129022282/cell_31
[ "text_html_output_1.png" ]
from imblearn.over_sampling import SMOTE from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() columns_list = list(df_train.columns) for i in range(len(columns_list)): plt.xticks(rotation=45) plt.tight_layout() le = LabelEncoder() str_col = df_train.select_dtypes(include='object').columns for c in str_col: df_train[c] = le.fit_transform(df_train[c]) smt = SMOTE(random_state=0) xtrain_res, ytrain_res = smt.fit_resample(xtrain, ytrain) rf = RandomForestClassifier(max_depth=3, random_state=0) rf.fit(xtrain_res, ytrain_res) ypred = rf.predict(xtest) le = LabelEncoder() str_col = df_test.select_dtypes(include='object').columns for c in str_col: df_test[c] = le.fit_transform(df_test[c]) test_predict = rf.predict(df_test) test_result = pd.DataFrame(df_test) test_result['predicted_status'] = test_predict test_result.head()
code
129022282/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum() columns_list = list(df_train.columns) plt.figure(figsize=(12, 20)) for i in range(len(columns_list)): plt.subplot(5, 3, i + 1) plt.title(columns_list[i]) plt.xticks(rotation=45) plt.hist(df_train[columns_list[i]]) plt.tight_layout()
code
129022282/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.isna().sum() df_train.isna().sum()
code
129022282/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/loan-data-set/loan_train.csv') df_test = pd.read_csv('/kaggle/input/loan-data-set/loan_test.csv') df_train.head()
code
72106185/cell_13
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') train train.drop('id', axis=1, inplace=True) na_count = pd.Series([train[col].isna().sum() for col in train.columns], index=train.columns, name='NA Count') na_count y = train['target'] X = train.drop('target', axis=1) num_cols = [col for col in X.columns if X[col].dtype in ['int64', 'float64']] cat_cols = [col for col in X.columns if X[col].dtype == 'object' and X[col].nunique() < 10] cardinality = pd.Series([X[col].nunique() for col in cat_cols], index=cat_cols, name='Cardinality') cardinality from sklearn.preprocessing import OneHotEncoder OHE = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OHE.fit_transform(X_train[cat_cols])) OH_cols_valid = pd.DataFrame(OHE.transform(X_valid[cat_cols])) OH_cols_train.index = X_train.index OH_cols_valid.index = X_valid.index num_X_train = X_train.drop(cat_cols, axis=1) num_X_valid = X_valid.drop(cat_cols, axis=1) OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1) from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error model = XGBRegressor(n_estimators=600, n_jobs=4, learning_rate=0.05, random_state=0) model.fit(OH_X_train, y_train) preds = model.predict(OH_X_valid) print('RMSE: ', mean_squared_error(preds, y_valid, squared=False))
code