markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
hash
stringlengths
32
32
Then we import it into a SQLite3 database. The following function automatically guesses the table schema.
from pyensae.sql import import_flatfile_into_database import_flatfile_into_database("velib_vanves.db3", "velib_vanves.txt", add_key="key")
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
26e8c3db62d9b04c7b8f8d7c9fa3ad4c
We check the database exists:
import os os.listdir(".")
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
f19884ffbc499b1c7b38fc8d288471a1
On Windows, you can use SQLiteSpy to visualize the created table. We use pymysintall to download it.
try: from pymyinstall.installcustom import install_sqlitespy exe = install_sqlitespy() except: # we skip an exception # the website can be down... exe = None exe
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
feb81353b6435ff3ed91441262455ddb
We just need to run it (see run_cmd).
if exe: from pyquickhelper import run_cmd run_cmd("SQLiteSpy.exe velib_vanves.db3")
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
6b349b762292cfafc394548fd0c698a1
You should be able to see something like (on Windows):
from pyquickhelper.helpgen import NbImage NbImage('img_nb_sqlitespy.png')
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
8c118028ebab884564a835f3653c769c
It is easier to use that tool to extract a sample of the data. Once it is ready, you can execute the SQL query in Python and converts the results into a DataFrame. The following code extracts a random sample from the original sets.
sql = """SELECT * FROM velib_vanves WHERE key IN ({0})""" import random from pyquickhelper.loghelper import noLOG from pyensae.sql import Database db = Database("velib_vanves.db3", LOG = noLOG) db.connect() mx = db.execute_view("SELECT MAX(key) FROM velib_vanves")[0][0] rnd_ids = [ random.randint(1,mx) for i in range(0,100) ] # liste de 100 id aléatoires strids = ",".join( str(_) for _ in rnd_ids ) res = db.execute_view(sql.format (strids)) df = db.to_df(sql.format (strids)) db.close() df.head()[["key","last_update","available_bike_stands","available_bikes"]]
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
5e96b0b958d74382e48290fe1d2e379a
<h3 id="mem">Memory Dump</h3> Once you have a big dataset available in text format, it takes some time to load into memory and you need to do that every time you need it again after you closed your python instance.
with open("temp_big_file.txt","w") as f : f.write("c1\tc2\tc3\n") for i in range(0,10000000): x = [ i, random.random(), random.random() ] s = [ str(_) for _ in x ] f.write( "\t".join(s) + "\n" ) os.stat("temp_big_file.txt").st_size import pandas,time t = time.perf_counter() df = pandas.read_csv("temp_big_file.txt",sep="\t") print("duration (s)",time.perf_counter()-t)
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
ab3519403df7df18afd62dffd3be644b
It is slow considering that many datasets contain many more features. But we can speed it up by doing a kind of memory dump with to_pickle.
t = time.perf_counter() df.to_pickle("temp_big_file.bin") print("duration (s)",time.perf_counter()-t)
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
0a2598bbebfde6e5800ed8869db88c3c
And we reload it with read_pickle:
t = time.perf_counter() df = pandas.read_pickle("temp_big_file.bin") print("duration (s)",time.perf_counter()-t)
_doc/notebooks/pyensae_flat2db3.ipynb
sdpython/pyensae
mit
e5635595ca7fcabc01fb40002093b0ae
First fetch from the primary source in s3 as per bug 1312006. We fall back to the github location if this is not available.
import boto3 import botocore import json import tempfile import urllib2 def fetch_schema(): """ Fetch the crash data schema from an s3 location or github location. This returns the corresponding JSON schema in a python dictionary. """ region = "us-west-2" bucket = "org-mozilla-telemetry-crashes" key = "crash_report.json" fallback_url = "https://raw.githubusercontent.com/mozilla/socorro/master/socorro/schemas/crash_report.json" try: log.info("Fetching latest crash data schema from s3://{}/{}".format(bucket, key)) s3 = boto3.client('s3', region_name=region) # download schema to memory via a file like object resp = tempfile.TemporaryFile() s3.download_fileobj(bucket, key, resp) resp.seek(0) except botocore.exceptions.ClientError as e: log.warning(("Could not fetch schema from s3://{}/{}: {}\n" "Fetching crash data schema from {}") .format(bucket, key, e, fallback_url)) resp = urllib2.urlopen(fallback_url) return json.load(resp)
reports/socorro_import/ImportCrashData.ipynb
acmiyaguchi/data-pipeline
mpl-2.0
905223f4d883095973e58a85f9bf2918
Read crash data as json, convert it to parquet
from datetime import datetime as dt, timedelta, date from pyspark.sql import SQLContext def daterange(start_date, end_date): for n in range(int((end_date - start_date).days) + 1): yield (end_date - timedelta(n)).strftime("%Y%m%d") def import_day(d, schema, version): """Convert JSON data stored in an S3 bucket into parquet, indexed by crash_date.""" source_s3path = "s3://org-mozilla-telemetry-crashes/v1/crash_report" dest_s3path = "s3://telemetry-parquet/socorro_crash/" num_partitions = 10 log.info("Processing {}, started at {}".format(d, dt.utcnow())) cur_source_s3path = "{}/{}".format(source_s3path, d) cur_dest_s3path = "{}/v{}/crash_date={}".format(dest_s3path, version, d) df = sqlContext.read.json(cur_source_s3path, schema=schema) df.repartition(num_partitions).write.parquet(cur_dest_s3path, mode="overwrite") def backfill(start_date_yyyymmdd, schema, version): """ Import data from a start date to yesterday's date. Example: backfill("20160902", crash_schema, version) """ start_date = dt.strptime(start_date_yyyymmdd, "%Y%m%d") end_date = dt.utcnow() - timedelta(1) # yesterday for d in daterange(start_date, end_date): try: import_day(d) except Exception as e: log.error(e) from os import environ # get the relevant date yesterday = dt.strftime(dt.utcnow() - timedelta(1), "%Y%m%d") target_date = environ.get('date', yesterday) # fetch and generate the schema schema_data = fetch_schema() crash_schema = create_struct(schema_data) version = schema_data.get('$target_version', 0) # default to v0 # process the data import_day(target_date, crash_schema, version)
reports/socorro_import/ImportCrashData.ipynb
acmiyaguchi/data-pipeline
mpl-2.0
1a37214c94af1ae40fd2f4815ed94c82
Downloading the MosMedData: Chest CT Scans with COVID-19 Related Findings In this example, we use a subset of the MosMedData: Chest CT Scans with COVID-19 Related Findings. This dataset consists of lung CT scans with COVID-19 related findings, as well as without such findings. We will be using the associated radiological findings of the CT scans as labels to build a classifier to predict presence of viral pneumonia. Hence, the task is a binary classification problem.
# Download url of normal CT scans. url = "https://github.com/hasibzunair/3D-image-classification-tutorial/releases/download/v0.2/CT-0.zip" filename = os.path.join(os.getcwd(), "CT-0.zip") keras.utils.get_file(filename, url) # Download url of abnormal CT scans. url = "https://github.com/hasibzunair/3D-image-classification-tutorial/releases/download/v0.2/CT-23.zip" filename = os.path.join(os.getcwd(), "CT-23.zip") keras.utils.get_file(filename, url) # Make a directory to store the data. os.makedirs("MosMedData") # Unzip data in the newly created directory. with zipfile.ZipFile("CT-0.zip", "r") as z_fp: z_fp.extractall("./MosMedData/") with zipfile.ZipFile("CT-23.zip", "r") as z_fp: z_fp.extractall("./MosMedData/")
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
0906bae2fcd15182999903768fb65a4e
Loading data and preprocessing The files are provided in Nifti format with the extension .nii. To read the scans, we use the nibabel package. You can install the package via pip install nibabel. CT scans store raw voxel intensity in Hounsfield units (HU). They range from -1024 to above 2000 in this dataset. Above 400 are bones with different radiointensity, so this is used as a higher bound. A threshold between -1000 and 400 is commonly used to normalize CT scans. To process the data, we do the following: We first rotate the volumes by 90 degrees, so the orientation is fixed We scale the HU values to be between 0 and 1. We resize width, height and depth. Here we define several helper functions to process the data. These functions will be used when building training and validation datasets.
import nibabel as nib from scipy import ndimage def read_nifti_file(filepath): """Read and load volume""" # Read file scan = nib.load(filepath) # Get raw data scan = scan.get_fdata() return scan def normalize(volume): """Normalize the volume""" min = -1000 max = 400 volume[volume < min] = min volume[volume > max] = max volume = (volume - min) / (max - min) volume = volume.astype("float32") return volume def resize_volume(img): """Resize across z-axis""" # Set the desired depth desired_depth = 64 desired_width = 128 desired_height = 128 # Get current depth current_depth = img.shape[-1] current_width = img.shape[0] current_height = img.shape[1] # Compute depth factor depth = current_depth / desired_depth width = current_width / desired_width height = current_height / desired_height depth_factor = 1 / depth width_factor = 1 / width height_factor = 1 / height # Rotate img = ndimage.rotate(img, 90, reshape=False) # Resize across z-axis img = ndimage.zoom(img, (width_factor, height_factor, depth_factor), order=1) return img def process_scan(path): """Read and resize volume""" # Read scan volume = read_nifti_file(path) # Normalize volume = normalize(volume) # Resize width, height and depth volume = resize_volume(volume) return volume
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
76cf72960ac61306515065300d83c38e
Let's read the paths of the CT scans from the class directories.
# Folder "CT-0" consist of CT scans having normal lung tissue, # no CT-signs of viral pneumonia. normal_scan_paths = [ os.path.join(os.getcwd(), "MosMedData/CT-0", x) for x in os.listdir("MosMedData/CT-0") ] # Folder "CT-23" consist of CT scans having several ground-glass opacifications, # involvement of lung parenchyma. abnormal_scan_paths = [ os.path.join(os.getcwd(), "MosMedData/CT-23", x) for x in os.listdir("MosMedData/CT-23") ] print("CT scans with normal lung tissue: " + str(len(normal_scan_paths))) print("CT scans with abnormal lung tissue: " + str(len(abnormal_scan_paths)))
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
e7a1f1b7db4b1a8e94652dcd871709bf
Build train and validation datasets Read the scans from the class directories and assign labels. Downsample the scans to have shape of 128x128x64. Rescale the raw HU values to the range 0 to 1. Lastly, split the dataset into train and validation subsets.
# Read and process the scans. # Each scan is resized across height, width, and depth and rescaled. abnormal_scans = np.array([process_scan(path) for path in abnormal_scan_paths]) normal_scans = np.array([process_scan(path) for path in normal_scan_paths]) # For the CT scans having presence of viral pneumonia # assign 1, for the normal ones assign 0. abnormal_labels = np.array([1 for _ in range(len(abnormal_scans))]) normal_labels = np.array([0 for _ in range(len(normal_scans))]) # Split data in the ratio 70-30 for training and validation. x_train = np.concatenate((abnormal_scans[:70], normal_scans[:70]), axis=0) y_train = np.concatenate((abnormal_labels[:70], normal_labels[:70]), axis=0) x_val = np.concatenate((abnormal_scans[70:], normal_scans[70:]), axis=0) y_val = np.concatenate((abnormal_labels[70:], normal_labels[70:]), axis=0) print( "Number of samples in train and validation are %d and %d." % (x_train.shape[0], x_val.shape[0]) )
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
4d0a8b2151f0c17e66d22ef7e01a2ef8
Data augmentation The CT scans also augmented by rotating at random angles during training. Since the data is stored in rank-3 tensors of shape (samples, height, width, depth), we add a dimension of size 1 at axis 4 to be able to perform 3D convolutions on the data. The new shape is thus (samples, height, width, depth, 1). There are different kinds of preprocessing and augmentation techniques out there, this example shows a few simple ones to get started.
import random from scipy import ndimage @tf.function def rotate(volume): """Rotate the volume by a few degrees""" def scipy_rotate(volume): # define some rotation angles angles = [-20, -10, -5, 5, 10, 20] # pick angles at random angle = random.choice(angles) # rotate volume volume = ndimage.rotate(volume, angle, reshape=False) volume[volume < 0] = 0 volume[volume > 1] = 1 return volume augmented_volume = tf.numpy_function(scipy_rotate, [volume], tf.float32) return augmented_volume def train_preprocessing(volume, label): """Process training data by rotating and adding a channel.""" # Rotate volume volume = rotate(volume) volume = tf.expand_dims(volume, axis=3) return volume, label def validation_preprocessing(volume, label): """Process validation data by only adding a channel.""" volume = tf.expand_dims(volume, axis=3) return volume, label
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
2a5fc4599054a8706fb0c0d3ef881093
While defining the train and validation data loader, the training data is passed through and augmentation function which randomly rotates volume at different angles. Note that both training and validation data are already rescaled to have values between 0 and 1.
# Define data loaders. train_loader = tf.data.Dataset.from_tensor_slices((x_train, y_train)) validation_loader = tf.data.Dataset.from_tensor_slices((x_val, y_val)) batch_size = 2 # Augment the on the fly during training. train_dataset = ( train_loader.shuffle(len(x_train)) .map(train_preprocessing) .batch(batch_size) .prefetch(2) ) # Only rescale. validation_dataset = ( validation_loader.shuffle(len(x_val)) .map(validation_preprocessing) .batch(batch_size) .prefetch(2) )
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
96bd408775a6e141d6075b39f4e1fd97
Visualize an augmented CT scan.
import matplotlib.pyplot as plt data = train_dataset.take(1) images, labels = list(data)[0] images = images.numpy() image = images[0] print("Dimension of the CT scan is:", image.shape) plt.imshow(np.squeeze(image[:, :, 30]), cmap="gray")
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
32e063bd77e7e558cba46221b106a101
Since a CT scan has many slices, let's visualize a montage of the slices.
def plot_slices(num_rows, num_columns, width, height, data): """Plot a montage of 20 CT slices""" data = np.rot90(np.array(data)) data = np.transpose(data) data = np.reshape(data, (num_rows, num_columns, width, height)) rows_data, columns_data = data.shape[0], data.shape[1] heights = [slc[0].shape[0] for slc in data] widths = [slc.shape[1] for slc in data[0]] fig_width = 12.0 fig_height = fig_width * sum(heights) / sum(widths) f, axarr = plt.subplots( rows_data, columns_data, figsize=(fig_width, fig_height), gridspec_kw={"height_ratios": heights}, ) for i in range(rows_data): for j in range(columns_data): axarr[i, j].imshow(data[i][j], cmap="gray") axarr[i, j].axis("off") plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1) plt.show() # Visualize montage of slices. # 4 rows and 10 columns for 100 slices of the CT scan. plot_slices(4, 10, 128, 128, image[:, :, :40])
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
511a28e4cc7a33d5798bcde4a47c907e
Define a 3D convolutional neural network To make the model easier to understand, we structure it into blocks. The architecture of the 3D CNN used in this example is based on this paper.
def get_model(width=128, height=128, depth=64): """Build a 3D convolutional neural network model.""" inputs = keras.Input((width, height, depth, 1)) x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs) x = layers.MaxPool3D(pool_size=2)(x) x = layers.BatchNormalization()(x) x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x) x = layers.MaxPool3D(pool_size=2)(x) x = layers.BatchNormalization()(x) x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x) x = layers.MaxPool3D(pool_size=2)(x) x = layers.BatchNormalization()(x) x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x) x = layers.MaxPool3D(pool_size=2)(x) x = layers.BatchNormalization()(x) x = layers.GlobalAveragePooling3D()(x) x = layers.Dense(units=512, activation="relu")(x) x = layers.Dropout(0.3)(x) outputs = layers.Dense(units=1, activation="sigmoid")(x) # Define the model. model = keras.Model(inputs, outputs, name="3dcnn") return model # Build model. model = get_model(width=128, height=128, depth=64) model.summary()
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
8a99827da00ec3b32b98037a7dbbcaa0
Train model
# Compile model. initial_learning_rate = 0.0001 lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True ) model.compile( loss="binary_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=lr_schedule), metrics=["acc"], ) # Define callbacks. checkpoint_cb = keras.callbacks.ModelCheckpoint( "3d_image_classification.h5", save_best_only=True ) early_stopping_cb = keras.callbacks.EarlyStopping(monitor="val_acc", patience=15) # Train the model, doing validation at the end of each epoch epochs = 100 model.fit( train_dataset, validation_data=validation_dataset, epochs=epochs, shuffle=True, verbose=2, callbacks=[checkpoint_cb, early_stopping_cb], )
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
dc3b141d86de76799d2b6ac264a06043
It is important to note that the number of samples is very small (only 200) and we don't specify a random seed. As such, you can expect significant variance in the results. The full dataset which consists of over 1000 CT scans can be found here. Using the full dataset, an accuracy of 83% was achieved. A variability of 6-7% in the classification performance is observed in both cases. Visualizing model performance Here the model accuracy and loss for the training and the validation sets are plotted. Since the validation set is class-balanced, accuracy provides an unbiased representation of the model's performance.
fig, ax = plt.subplots(1, 2, figsize=(20, 3)) ax = ax.ravel() for i, metric in enumerate(["acc", "loss"]): ax[i].plot(model.history.history[metric]) ax[i].plot(model.history.history["val_" + metric]) ax[i].set_title("Model {}".format(metric)) ax[i].set_xlabel("epochs") ax[i].set_ylabel(metric) ax[i].legend(["train", "val"])
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
11e372033218b64d088ee197368a1a65
Make predictions on a single CT scan
# Load best weights. model.load_weights("3d_image_classification.h5") prediction = model.predict(np.expand_dims(x_val[0], axis=0))[0] scores = [1 - prediction[0], prediction[0]] class_names = ["normal", "abnormal"] for score, name in zip(scores, class_names): print( "This model is %.2f percent confident that CT scan is %s" % ((100 * score), name) )
examples/vision/ipynb/3D_image_classification.ipynb
keras-team/keras-io
apache-2.0
bffbda570cada211bff30c37c05bd2ef
model load data
%matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # define the labels col_labels=['C2H3', 'C2H6', 'CH2', 'H2CN', 'C2H4', 'H2O2', 'C2H', 'CN', 'heatRelease', 'NCO', 'NNH', 'N2', 'AR', 'psi', 'CO', 'CH4', 'HNCO', 'CH2OH', 'HCCO', 'CH2CO', 'CH', 'mu', 'C2H2', 'C2H5', 'H2', 'T', 'PVs', 'O', 'O2', 'N2O', 'C', 'C3H7', 'CH2(S)', 'NH3', 'HO2', 'NO', 'HCO', 'NO2', 'OH', 'HCNO', 'CH3CHO', 'CH3', 'NH', 'alpha', 'CH3O', 'CO2', 'CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN', 'H', 'N', 'H2O', 'HCCOH', 'HCNN'] # Taking 0 out col_labels.remove('AR') col_labels.remove('heatRelease') # labels = ['CH4','O2','H2O','CO','CO2','T','PVs','psi','mu','alpha'] labels = ['T','PVs'] # labels = ['T','CH4','O2','CO2','CO','H2O','H2','OH','psi'] # labels = ['CH2OH','HNCO','CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN'] # labels = np.random.choice(col_labels,20,replace=False).tolist() # labels.append('PVs') # labels = col_labels print(labels) input_features=['f','pv','zeta'] # read in the data x_input, y_label, df, in_scaler, out_scaler = read_h5_data('./data/tables_of_fgm.h5',input_features=input_features, labels = labels)
FPV_ANN/notebooks/.ipynb_checkpoints/fgm_nn_inhouse-checkpoint.ipynb
uqyge/combustionML
mit
1f13c6cc6f880a147e5969708f341243
model training gpu training
import keras.backend as K from keras.callbacks import LearningRateScheduler import math def cubic_loss(y_true, y_pred): return K.mean(K.square(y_true - y_pred)*K.abs(y_true - y_pred), axis=-1) def coeff_r2(y_true, y_pred): from keras import backend as K SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) def step_decay(epoch): initial_lrate = 0.001 drop = 0.5 epochs_drop = 200.0 lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop)) return lrate lrate = LearningRateScheduler(step_decay) from keras import optimizers batch_size = 1024*32 epochs = 60 vsplit = 0.1 loss_type='mse' adam_op = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,epsilon=1e-8, decay=0.0, amsgrad=True) model.compile(loss=loss_type, optimizer=adam_op, metrics=[coeff_r2]) # model.compile(loss=cubic_loss, optimizer=adam_op, metrics=['accuracy']) # checkpoint (save the best model based validate loss) !mkdir ./tmp filepath = "./tmp/weights.best.cntk.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=20) # callbacks_list = [checkpoint] callbacks_list = [lrate] # fit the model history = model.fit( x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=vsplit, verbose=2, # callbacks=callbacks_list, shuffle=True) model.save('trained_fgm_nn.h5')
FPV_ANN/notebooks/.ipynb_checkpoints/fgm_nn_inhouse-checkpoint.ipynb
uqyge/combustionML
mit
b1b0fbe8263c906151dab7c11138f7ea
prepare data for plotting GPU data prepare
from sklearn.metrics import r2_score # model.load_weights("./tmp/weights.best.cntk.hdf5") x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features) y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels) predict_val = model.predict(x_test,batch_size=1024*8) predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels) test_data=pd.concat([x_test_df,y_test_df],axis=1) pred_data=pd.concat([x_test_df,predict_df],axis=1) !rm sim_check.h5 test_data.to_hdf('sim_check.h5',key='test') pred_data.to_hdf('sim_check.h5',key='pred') df_test=pd.read_hdf('sim_check.h5',key='test') df_pred=pd.read_hdf('sim_check.h5',key='pred') zeta_level=list(set(df_test['zeta'])) zeta_level.sort() res_sum=pd.DataFrame() r2s=[] r2s_i=[] names=[] maxs_0=[] maxs_9=[] for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns): names.append(name) r2s.append(r2) maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max()) maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max()) for i in zeta_level: r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name], df_test[df_test['zeta']==i][name])) res_sum['name']=names # res_sum['max_0']=maxs_0 # res_sum['max_9']=maxs_9 res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)] # res_sum['r2']=r2s tmp=np.asarray(r2s_i).reshape(-1,10) for idx,z in enumerate(zeta_level): res_sum['r2s_'+str(z)]=tmp[:,idx] res_sum[3:] no_drop=res_sum[3:] no_drop
FPV_ANN/notebooks/.ipynb_checkpoints/fgm_nn_inhouse-checkpoint.ipynb
uqyge/combustionML
mit
4800ec2c55c354a2d8340840f1eeaa89
The optimization problem The problem we are considering is a mathematical one <img src="cone.png" width=500px/> Decisions: r in [0, 10] cm; h in [0, 20] cm Objectives: minimize S, T Constraints: V > 200cm<sup>3</sup>
# Few Utility functions def say(*lst): """ Print whithout going to new line """ print(*lst, end="") sys.stdout.flush() def random_value(low, high, decimals=2): """ Generate a random number between low and high. decimals incidicate number of decimal places """ return round(random.uniform(low, high),decimals) def gt(a, b): return a > b def lt(a, b): return a < b def shuffle(lst): """ Shuffle a list """ random.shuffle(lst) return lst class Decision(O): """ Class indicating Decision of a problem """ def __init__(self, name, low, high): """ @param name: Name of the decision @param low: minimum value @param high: maximum value """ O.__init__(self, name=name, low=low, high=high) class Objective(O): """ Class indicating Objective of a problem """ def __init__(self, name, do_minimize=True): """ @param name: Name of the objective @param do_minimize: Flag indicating if objective has to be minimized or maximized """ O.__init__(self, name=name, do_minimize=do_minimize) class Point(O): """ Represents a member of the population """ def __init__(self, decisions): O.__init__(self) self.decisions = decisions self.objectives = None def __hash__(self): return hash(tuple(self.decisions)) def __eq__(self, other): return self.decisions == other.decisions def clone(self): new = Point(self.decisions) new.objectives = self.objectives return new class Problem(O): """ Class representing the cone problem. """ def __init__(self): O.__init__(self) # TODO 2: Code up decisions and objectives below for the problem # using the auxilary classes provided above. self.decisions = None self.objectives = None radius = Decision('radius', 0, 10) height = Decision('height', 0, 20) self.decisions = [radius, height] s = Objective('surface') t = Objective('total area') self.objectives = [s,t] @staticmethod def evaluate(point): [r, h] = point.decisions point.objectives = None # TODO 3: Evaluate the objectives S and T for the point. l = (r**2 + h**2)**0.5 S = pi * r * l T = S + pi * r**2 point.objectives = [S, T] return point.objectives @staticmethod def is_valid(point): [r, h] = point.decisions # TODO 4: Check if the point has valid decisions V = pi*(r**2)*h/3 return V > 200 def generate_one(self): # TODO 5: Generate a valid instance of Point. while True: point = Point([random_value(d.low, d.high) for d in self.decisions]) if Problem.is_valid(point): return point cone = Problem() point = cone.generate_one() cone.evaluate(point) print(point)
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
792ce70b47d4d869d5c2b5ddc5d106dc
Great. Now that the class and its basic methods is defined, we move on to code up the GA. Population First up is to create an initial population.
def populate(problem, size): population = [] # TODO 6: Create a list of points of length 'size' return [problem.generate_one() for _ in xrange(size)] print (populate(cone,5))
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
ac84b3dd0a2c33ed6b549180f8f5c3f4
Crossover We perform a single point crossover between two points
def crossover(mom, dad): # TODO 7: Create a new point which contains decisions from # the first half of mom and second half of dad n = len(mom.decisions) return Point(mom.decisions[:n//2] + dad.decisions[n//2:]) pop = populate(cone,5) crossover(pop[0], pop[1])
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
24e4dace869d9e927f837a68a2e15366
Mutation Randomly change a decision such that
def mutate(problem, point, mutation_rate=0.01): # TODO 8: Iterate through all the decisions in the problem # and if the probability is less than mutation rate # change the decision(randomly set it between its max and min). for i, d in enumerate(problem.decisions): if random.random() < mutation_rate: point.decisions[i] = random_value(d.low, d.high) return point print (mutate(cone,point,0.1)) obs = populate(cone,5) print (obs)
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
e5007dcc4f495a12748fe5d1eff1f48d
Fitness Evaluation To evaluate fitness between points we use binary domination. Binary Domination is defined as follows: * Consider two points one and two. * For every decision o and t in one and two, o <= t * Atleast one decision o and t in one and two, o == t Note: Binary Domination is not the best method to evaluate fitness but due to its simplicity we choose to use it for this workshop.
def bdom(problem, one, two): """ Return if one dominates two """ objs_one = problem.evaluate(one) objs_two = problem.evaluate(two) if (one == two): return False dominates = False # TODO 9: Return True/False based on the definition # of bdom above. first = True second = False for i,_ in enumerate(problem.objectives): if ((first is True) & gt(one.objectives[i], two.objectives[i])): first = False elif (not second & (one.objectives[i] is not two.objectives[i])): second = True dominates = first & second return dominates print (bdom(cone,obs[4],obs[4]))
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
689995827b162cc7d1b5401e490c05a1
Fitness and Elitism In this workshop we will count the number of points of the population P dominated by a point A as the fitness of point A. This is a very naive measure of fitness since we are using binary domination. Few prominent alternate methods are 1. Continuous Domination - Section 3.1 2. Non-dominated Sort 3. Non-dominated Sort + Niching Elitism: Sort points with respect to the fitness and select the top points.
def fitness(problem, population, point): dominates = 0 # TODO 10: Evaluate fitness of a point. # For this workshop define fitness of a point # as the number of points dominated by it. # For example point dominates 5 members of population, # then fitness of point is 5. for pop in population: if bdom(problem, point, pop): dominates += 1 return dominates def elitism(problem, population, retain_size): # TODO 11: Sort the population with respect to the fitness # of the points and return the top 'retain_size' points of the population fit_pop = [fitness(cone,population,pop) for pop in population] population = [pop for _,pop in sorted(zip(fit_pop,population), reverse = True)] return population[:retain_size]
code/5/WS1/tchhabr.ipynb
tarunchhabra26/fss16dst
apache-2.0
040045445b254212215975a3b70d997f
Load the pretrained weights into the network :
params = pickle.load(open('./data/googlenet/blvc_googlenet.pkl', 'rb'), encoding='iso-8859-1') model_param_values = params['param values'] #classes = params['synset words'] lasagne.layers.set_all_param_values(net_output_layer, model_param_values) IMAGE_W=224 print("Loaded Model parameters")
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
5094cc731e478ef2fe4ca0cfe9e00f4a
Executing the cell below will iterate through the images in the ./images/art-style/photos directory, so you can choose the one you want
photo_i += 1 photo = plt.imread(photos[photo_i % len(photos)]) photo_rawim, photo = googlenet.prep_image(photo) plt.imshow(photo_rawim)
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
22c77d2e23791cf10c42e92ce4cc969c
Executing the cell below will iterate through the images in the ./images/art-style/styles directory, so you can choose the one you want
style_i += 1 art = plt.imread(styles[style_i % len(styles)]) art_rawim, art = googlenet.prep_image(art) plt.imshow(art_rawim)
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
fccefc3947ec3f40c4017576c012252c
This defines various measures of difference that we'll use to compare the current output image with the original sources.
def plot_layout(combined): def no_axes(): plt.gca().xaxis.set_visible(False) plt.gca().yaxis.set_visible(False) plt.figure(figsize=(9,6)) plt.subplot2grid( (2,3), (0,0) ) no_axes() plt.imshow(photo_rawim) plt.subplot2grid( (2,3), (1,0) ) no_axes() plt.imshow(art_rawim) plt.subplot2grid( (2,3), (0,1), colspan=2, rowspan=2 ) no_axes() plt.imshow(combined, interpolation='nearest') plt.tight_layout() def gram_matrix(x): x = x.flatten(ndim=3) g = T.tensordot(x, x, axes=([2], [2])) return g def content_loss(P, X, layer): p = P[layer] x = X[layer] loss = 1./2 * ((x - p)**2).sum() return loss def style_loss(A, X, layer): a = A[layer] x = X[layer] A = gram_matrix(a) G = gram_matrix(x) N = a.shape[1] M = a.shape[2] * a.shape[3] loss = 1./(4 * N**2 * M**2) * ((G - A)**2).sum() return loss def total_variation_loss(x): return (((x[:,:,:-1,:-1] - x[:,:,1:,:-1])**2 + (x[:,:,:-1,:-1] - x[:,:,:-1,1:])**2)**1.25).sum()
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
7578c21fafef856847686580d0eae89b
Here are the GoogLeNet layers that we're going to pay attention to :
layers = [ # used for 'content' in photo - a mid-tier convolutional layer 'inception_4b/output', # used for 'style' - conv layers throughout model (not same as content one) 'conv1/7x7_s2', 'conv2/3x3', 'inception_3b/output', 'inception_4d/output', ] #layers = [ # # used for 'content' in photo - a mid-tier convolutional layer # 'pool4/3x3_s2', # # # used for 'style' - conv layers throughout model (not same as content one) # 'conv1/7x7_s2', 'conv2/3x3', 'pool3/3x3_s2', 'inception_5b/output', #] layers = {k: net[k] for k in layers}
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
14052d3cdb8d10acf4f5dcb50990706a
Precompute layer activations for photo and artwork This takes ~ 20 seconds
input_im_theano = T.tensor4() outputs = lasagne.layers.get_output(layers.values(), input_im_theano) photo_features = {k: theano.shared(output.eval({input_im_theano: photo})) for k, output in zip(layers.keys(), outputs)} art_features = {k: theano.shared(output.eval({input_im_theano: art})) for k, output in zip(layers.keys(), outputs)} # Get expressions for layer activations for generated image generated_image = theano.shared(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W)))) gen_features = lasagne.layers.get_output(layers.values(), generated_image) gen_features = {k: v for k, v in zip(layers.keys(), gen_features)}
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
894d7b36e1afb137e03206af8cbe36a2
Define the overall loss / badness function
losses = [] # content loss cl = 10 /1000. losses.append(cl * content_loss(photo_features, gen_features, 'inception_4b/output')) # style loss sl = 20 *1000. losses.append(sl * style_loss(art_features, gen_features, 'conv1/7x7_s2')) losses.append(sl * style_loss(art_features, gen_features, 'conv2/3x3')) losses.append(sl * style_loss(art_features, gen_features, 'inception_3b/output')) losses.append(sl * style_loss(art_features, gen_features, 'inception_4d/output')) #losses.append(sl * style_loss(art_features, gen_features, 'inception_5b/output')) # total variation penalty vp = 0.01 /1000. /1000. losses.append(vp * total_variation_loss(generated_image)) total_loss = sum(losses)
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
c6bdc91197c25fb7fd2b9db741e4d689
The Famous Symbolic Gradient operation
grad = T.grad(total_loss, generated_image)
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
5eef49c17638b3ecac6ba483961c8088
Get Ready for Optimisation by SciPy
# Theano functions to evaluate loss and gradient - takes around 1 minute (!) f_loss = theano.function([], total_loss) f_grad = theano.function([], grad) # Helper functions to interface with scipy.optimize def eval_loss(x0): x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W))) generated_image.set_value(x0) return f_loss().astype('float64') def eval_grad(x0): x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W))) generated_image.set_value(x0) return np.array(f_grad()).flatten().astype('float64')
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
9fb877dc634931d65bb08d9429bc9314
Initialize with the original photo, since going from noise (the code that's commented out) takes many more iterations.
generated_image.set_value(photo) #generated_image.set_value(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W)))) x0 = generated_image.get_value().astype('float64') iteration=0
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
581d767b378cd4a433d33aa9ba270cb6
Optimize all those losses, and show the image To refine the result, just keep hitting 'run' on this cell (each iteration is about 60 seconds) :
t0 = time.time() scipy.optimize.fmin_l_bfgs_b(eval_loss, x0.flatten(), fprime=eval_grad, maxfun=40) x0 = generated_image.get_value().astype('float64') iteration += 1 if False: plt.figure(figsize=(8,8)) plt.imshow(googlenet.deprocess(x0), interpolation='nearest') plt.axis('off') plt.text(270, 25, '# {} in {:.1f}sec'.format(iteration, (float(time.time() - t0))), fontsize=14) else: plot_layout(googlenet.deprocess(x0)) print('Iteration {}, ran in {:.1f}sec'.format(iteration, float(time.time() - t0)))
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mdda/fossasia-2016_deep-learning
mit
8232a4c19eec65855f0cfe9d9c6a366e
Note about slicing columns from a Numpy matrix If you want to extract a column i from a Numpy matrix A and keep it as a column vector, you need to use the slicing notation, A[:, i:i+1]. Not doing so can lead to subtle bugs. To see why, compare the following slices.
A = np.array ([[1, 2, 3], [4, 5, 6], [7, 8, 9] ], dtype=float) print "A[:, :] ==\n", A print "\nA[:, 0] ==\n", A[:, 0] print "\nA[:, 2:3] == \n", A[:, 2:3] print "\nAdd columns 0 and 2?" a0 = A[:, 0] a1 = A[:, 2:3] print a0 + a1
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
0a4ba3c3d2c06afacf325d8d682248dd
Sample data: Rock lobsters! As a concrete example of a classification task, consider the results of this experiment. Some marine biologists took a bunch of lobsters of varying sizes (size being a proxy for stage of development), and then tethered and exposed these lobsters to a variety of predators. The outcome that they measured is whether the lobsters survived or not. In this case, the data consists of a set of points, one point per lobster, where there is a single predictor (size) and the response is whether the lobsters survived (label "1") or died (label "0"). For the original paper, see this link. I can only imagine that this image is what marine biologists look like when experimenting with lobsters. Here is a plot of the raw data.
# http://www.stat.ufl.edu/~winner/data/lobster_survive.txt df_lobsters = pd.read_table ('http://www.stat.ufl.edu/~winner/data/lobster_survive.dat', sep=r'\s+', names=['CarapaceLen', 'Survived']) display (df_lobsters.head ()) print "..." display (df_lobsters.tail ()) sns.violinplot (x="Survived", y="CarapaceLen", data=df_lobsters, inner="quart")
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
b81cca98d25d0defded50a94dfbf8f76
Although the classes are distinct in the aggregate, where the median carapace (outer shell) length is around 36 mm for the lobsters that died and 42 mm for those that survived, they are not cleanly separable. Notation To develop some intuition and a method, let's now turn to a more general setting and work on synthetic data sets. Let the data consist of $m$ data points, where each point is $d$-dimensional. Each dimension corresponds to some continuously-valued predictor. In addition, each data point will have a binary label, whose value is either 0 or 1. Denote each point by an augumented vector, $x_i$, such that $$ \begin{array}{rcl} x_i & \equiv & \left(\begin{array}{c} 1 \ x_{i,1} \ x_{i,2} \ \vdots \ x_{i,d} \end{array}\right) . \end{array} $$ That is, the point is the $d$ coordinates augmented by an initial dummy coordinate whose value is 1. This convention is similar to what we did in linear regression. We can also stack these points as rows of a matrix, $X$, again, just as we did in regression: $$ \begin{array}{rcl} X \equiv \left(\begin{array}{c} x_0^T \ x_1^T \ \vdots \ x_{m-1}^T \end{array}\right) & = & \left(\begin{array}{ccccc} 1 & x_{0,1} & x_{0,2} & \cdots & x_{0,d} \ 1 & x_{1,1} & x_{1,2} & \cdots & x_{1,d} \ & & & \vdots & \ 1 & x_{m-1,1} & x_{m-1,2} & \cdots & x_{m-1,d} \ \end{array}\right). \end{array} $$ We will take the labels to be a binary column vector, $l \equiv \left(l_0, l_1, \ldots, l_{m-1}\right)^T$. An example We've pre-generated a synethetic data set consisting of labeled data points. Let's download and inspect it, first as a table and then visually.
df = pd.read_csv ('http://vuduc.org/cse6040/logreg_points_train.csv') display (df.head ()) print "..." display (df.tail ())
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
8c54f9a00a80a8f8452149b1f494052e
Next, let's extract the coordinates as a Numpy matrix of points and the labels as a Numpy column vector labels. Mathematically, the points matrix corresponds to $X$ and the labels vector corresponds to $l$.
points = np.insert (df.as_matrix (['x_1', 'x_2']), 0, 1.0, axis=1) labels = df.as_matrix (['label']) print "First and last 5 points:\n", '='*23, '\n', points[:5], '\n...\n', points[-5:], '\n' print "First and last 5 labels:\n", '='*23, '\n', labels[:5], '\n...\n', labels[-5:], '\n'
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
c4d6a4c3b4f7b5ac38375fc6e3fe7d70
Next, let's plot the data as a scatter plot using Plotly. To do so, we need to create separate traces, one for each cluster. Below, we've provided you with a function, make_2d_scatter_traces(), which does exactly that, given a labeled data set as a (points, labels) pair.
def assert_points_2d (points): """Checks the dimensions of a given point set.""" assert type (points) is np.ndarray assert points.ndim == 2 assert points.shape[1] == 3 def assert_labels (labels): """Checks the type of a given set of labels (must be integral).""" assert labels is not None assert (type (labels) is np.ndarray) or (type (labels) is list) def extract_clusters (points, labels): """ Given a list or array of labeled augmented points, this routine returns a pair of lists, (C[0:k], L[0:k]), where C[i] is an array of all points whose labels are L[i]. """ assert_points_2d (points) assert_labels (labels) id_label_pairs = list (enumerate (set (labels.flatten ()))) labels_map = dict ([(v, i) for (i, v) in id_label_pairs]) # Count how many points belong to each cluster counts = [0] * len (labels_map) for l in labels.flatten (): counts[labels_map[l]] += 1 # Allocate space for each cluster clusters = [np.zeros ((k, 3)) for k in counts] # Separate the points by cluster counts = [0] * len (labels_map) for (x, l) in zip (points, labels.flatten ()): l_id = labels_map[l] k = counts[l_id] clusters[l_id][k, :] = x counts[l_id] += 1 # Generate cluster labels cluster_labels = [None] * len (labels_map) for (l, i) in labels_map.items (): cluster_labels[i] = l return (clusters, cluster_labels) def make_2d_scatter_traces (points, labels=None): """ Given an augmented point set, possibly labeled, returns a list Plotly-compatible marker traces. """ assert_points_2d (points) traces = [] if labels is None: traces.append (Scatter (x=points[:, 1:2], y=points[:, 2:3], mode='markers')) else: assert_labels (labels) (clusters, cluster_labels) = extract_clusters (points, labels) for (c, l) in zip (clusters, cluster_labels): traces.append (Scatter (x=c[:, 1:2], y=c[:, 2:3], mode='markers', name="%s" % str (l))) return traces print "Number of points:", len (points) traces = make_2d_scatter_traces (points, labels) py.iplot (traces)
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
8a08a052b71ccabd7d1a3c43927f5c20
Linear discriminants Suppose you think that the boundary between the two clusters may be represented by a line. For the synthetic data example above, I hope you'll agree that such a model is not a terrible one. This line is referred to as a linear discriminant. Any point $x$ on this line may be described by $\theta^T x$, where $\theta$ is a vector of coefficients: $$ \begin{array}{rcl} \theta & \equiv & \left(\begin{array}{c} \theta_0 \ \theta_1 \ \vdots \ \theta_d \end{array}\right) . \ \end{array} $$ For example, consider the case of 2-D points ($d=2$): the condition that $\theta^T x = 0$ means that $$ \begin{array}{rrcl} & \theta^T x = 0 & = & \theta_0 + \theta_1 x_1 + \theta_2 x_2 \ \implies & x_2 & = & -\frac{\theta_0}{\theta_2} - \frac{\theta_1}{\theta_2} x_1. \end{array} $$ So that describes points on the line. However, given any point $x$ in the $d$-dimensional space that is not on the line, $\theta^T x$ still produces a value: that value will be positive on one side of the line ($\theta^T x > 0$) or negative on the other ($\theta^T x < 0$). Consequently, here is one simple way to use the linear discriminant function $\theta^T x$ to generate a label: just reinterpret its sign! In more mathematical terms, the function that converts, say, a positive value to the label "1" and all other values to the label "0" is called the heaviside function: $$ \begin{array}{rcl} H(y) & \equiv & \left{\begin{array}{ll} 1 & \mathrm{if}\ y > 0 \ 0 & \mathrm{if}\ y \leq 0 \end{array}\right.. \end{array} $$ Exercise. This exercise has three parts. 1) Given the a $m \times (d+1)$ matrix of augmented points (i.e., the $X$ matrix) and the vector $\theta$, implement a function to compute the value of the linear discriminant at each point. That is, the function should return a (column) vector $y$ where the $y_i = \theta^T x_i$. 2) Implement the heaviside function, $H(y)$. Your function should allow for an arbitrary matrix of input values, and should apply the heaviside function elementwise. Hint: Consider what Numpy's sign() function produces, and transform the result accordingly. 3) For the synthetic data you loaded above, determine a value of $\theta$ for which $H(\theta^T x)$ "best" separates the two clusters. To help you out, we've provided some Plotly code that draws the discriminant boundary and also applies $H(\theta^T x)$ to each point, coloring the point by whether it is correctly classified. (The code also prints the number of correcty classified points.) So, you just need to try different values of $\theta$ until you find something that is "close." Hint: We found a line that commits just 5 errors, out of 375 possible points.
def lin_discr (X, theta): # @YOUSE: Part 1 -- Complete this function. pass def heaviside (Y): # @YOUSE: Part 2 -- Complete this function pass
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
19327ec3d284fa785332ad24887a1730
The following is the code to generate the plot; look for the place to try different values of $\theta$ a couple of code cells below.
def heaviside_int (Y): """Evaluates the heaviside function, but returns integer values.""" return heaviside (Y).astype (dtype=int) def assert_discriminant (theta, d=2): """ Verifies that the given coefficients correspond to a d-dimensional linear discriminant ($\theta$). """ assert len (theta) == (d+1) def gen_lin_discr_labels (points, theta, fun=heaviside_int): """ Given a set of points and the coefficients of a linear discriminant, this function returns a set of labels for the points with respect to this discriminant. """ assert_points_2d (points) assert_discriminant (theta) score = lin_discr (points, theta) labels = fun (score) return labels def gen_lin_discr_trace (points, theta, name='Discriminant'): """ Given a set of points and the coefficients of a linear discriminant, this function returns a set of Plotly traces that show how the points are classified as well as the location of the discriminant boundary. """ assert_points_2d (points) assert_discriminant (theta) x1 = [min (points[:, 1]), max (points[:, 1])] m = -theta[1] / theta[2] b = -theta[0] / theta[2] x2 = [(b + m*x) for x in x1] return Scatter (x=x1, y=x2, mode='lines', name=name) def np_row_vec (init_list): """Generates a Numpy-compatible row vector.""" return np.array (init_list, order='F', ndmin=2) def np_col_vec (init_list): """Generates a Numpy-compatible column vector.""" return np_row_vec (init_list).T def gen_labels_part3 (points, labels, theta): your_labels = gen_lin_discr_labels (points, theta) return (labels == your_labels) # @YOUSE: Part 3 -- Select parameters for theta! theta = np_col_vec ([0., -1., 3.]) # Generate 0/1 labels for your discriminant: is_correct = gen_labels_part3 (points, labels, theta) print "Number of misclassified points:", (len (points) - sum (is_correct))[0] print "\n(Run the code cell below to visualize the results.)" # Visually inspect the above results traces = make_2d_scatter_traces (points, is_correct) traces.append (gen_lin_discr_trace (points, theta)) # Plot it! layout = Layout (xaxis=dict (range=[-1.25, 2.25]), yaxis=dict (range=[-3.25, 2.25])) fig = Figure (data=traces, layout=layout) py.iplot (fig)
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
ddeba3d266dcf80bdb8a6ecfbfaaa0ab
An alternative linear discriminant: the logistic or "sigmoid" function The heaviside function, $H(\theta^T x)$, enforces a sharp boundary between classes around the $\theta^T x=0$ line. The following code produces a contour plot to show this effect.
# Use Numpy's handy meshgrid() to create a regularly-spaced grid of values. # http://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html x1 = np.linspace (-2., +2., 100) x2 = np.linspace (-2., +2., 100) x1_grid, x2_grid = np.meshgrid (x1, x2) h_grid = heaviside (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid) trace_grid = Contour (x=x1, y=x2, z=h_grid) py.iplot ([trace_grid])
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
5830a537ba6cb5ffe5b69e52480a9e73
However, as the lobsters example suggests, real data are not likely to be cleanly separable, especially when the number of features we have at our disposal is relatively small. Since the labels are binary, a natural idea is to give the classification problem a probabilistic interpretation. The logistic function provides at least one way to do so: $$ \begin{array}{rcl} G(y) & \equiv & \frac{1}{1 + e^{-y}} \end{array} $$ This function is also sometimes called the logit or sigmoid function. The logistic function takes any value in the range $(-\infty, +\infty)$ and produces a value in the range $(0, 1)$. Thus, given a value $x$, we can interpret it as a conditional probability that the label is 1. Exercise. Consider a set of 1-D points generated by a mixture of Gaussians. That is, suppose that there are two Gaussian distributions over the 1-dimensional variable, $x \in (-\infty, +\infty)$, that have the same variance ($\sigma^2$) but different means ($\mu_0$ and $\mu_1$). Show that the conditional probability of observing a point labeled "1" given $x$ may be written as, $$\mathrm{Pr}\left[l=1\,|\,x\right] \propto \displaystyle \frac{1}{1 + e^{-(\theta_0 + \theta_1 x)}},$$ for a suitable definition of $\theta_0$ and $\theta_1$. To carry out this computation, recall Bayes's rule (also: Bayes's theorem): $$ \begin{array}{rcl} \mathrm{Pr}[l=1\,|\,x] & = & \dfrac{\mathrm{Pr}[x\,|\,l=1] \, \mathrm{Pr}[l=1]} {\mathrm{Pr}[x\,|\,l=0] \, \mathrm{Pr}[l=0] + \mathrm{Pr}[x\,|\,l=1] \, \mathrm{Pr}[l=1] }. \end{array} $$ You may assume the prior probabilities of observing a 0 or 1 are given by $\mathrm{Pr}[l=0] \equiv p_0$ and $\mathrm{Pr}[l=1] \equiv p_1$. Time and interest permitting, we'll solve this exercise on the whiteboard. Exercise. Implement the logistic function. Inspect the resulting plot of $G(y)$ in 1-D and then the contour plot of $G(\theta^T{x})$. Your function should accept a Numpy matrix of values, Y, and apply the sigmoid elementwise.
def logistic (Y): # @YOUSE: Implement the logistic function G(y) here pass
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
b980c4fa0346beef0ea8f186bfd964c4
Plot of your implementation in 1D:
x_logit_1d = np.linspace (-6.0, +6.0, 101) y_logit_1d = logistic (x_logit_1d) trace_logit_1d = Scatter (x=x_logit_1d, y=y_logit_1d) py.iplot ([trace_logit_1d])
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
12d1039d8061bb5ef6f3c55d88fe7179
Contour plot of your function:
g_grid = logistic (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid) trace_logit_grid = Contour (x=x1, y=x2, z=g_grid) py.iplot ([trace_logit_grid])
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
c13610fc4a41969bbf8802fbcfeea603
Exercise. Verify the following properties of the logistic function, $G(y)$. $$ \begin{array}{rcll} G(y) & = & \frac{e^y}{e^y + 1} & \mathrm{(P1)} \ G(-y) & = & 1 - G(y) & \mathrm{(P2)} \ \dfrac{dG}{dy} & = & G(y) G(-y) & \mathrm{(P3)} \ {\dfrac{d}{dy}} {\left[ \ln G(y) \right]} & = & G(-y) & \mathrm{(P4)} \ {\dfrac{d}{dy}} {\ln \left[ 1 - G(y) \right]} & = & -G(y) & \mathrm{(P5)} \end{array} $$ Determining $\theta$ via Maximum Likelihood Estimation Previously, you determined $\theta$ for our synthetic dataset experimentally. Can you compute a good $\theta$ automatically? One of the standard techniques in statistics is to perform a maximum likelihood estimation (MLE) of a model's parameters, $\theta$. Indeed, MLE is basis for the "statistical" way to derive the normal equations in the case of linear regression, though that is of course not how we encountered it in this class. "Likelihood" as an objective function MLE derives from the following idea. Consider the joint probability of observing all of the labels, given the points and the parameters, $\theta$: $$ \mathrm{Pr}[l\,|\,X, \theta]. $$ Suppose these observations are independent and identically distributed (i.i.d.). Then the joint probability can be factored as the product of individual probabilities, $$ \begin{array}{rcl} \mathrm{Pr}[l\,|\,X,\theta] = \mathrm{Pr}[l_0, \ldots, l_{m-1}\,|\,x_0, \ldots, x_{m-1}, \theta] & = & \mathrm{Pr}[l_0\,|\,x_0, \theta] \cdots \mathrm{Pr}[l_{m-1}\,|\,x_{m-1}, \theta] \ & = & \displaystyle \prod_{i=0}^{m-1} \mathrm{Pr}[l_i\,|\,x_i,\theta]. \end{array} $$ The maximum likelihood principle says that you should try to choose a parameter $\theta$ that maximizes the chances ("likelihood") of seeing these particular observations. Thus, we can simply reinterpret the preceding probability as an objective function to optimize. Mathematically, it is equivalent and convenient to consider the logarithm of the likelihood, or log-likelihood, as the objective function, defining it by, $$ \begin{array}{rcl} \mathcal{L}(\theta; l, X) & \equiv & \log \left{ \displaystyle \prod_{i=0}^{m-1} \mathrm{Pr}[l_i\,|\,x_i,\theta] \right} \ & = & \displaystyle \sum_{i=0}^{m-1} \log \mathrm{Pr}[l_i\,|\,x_i,\theta]. \end{array} $$ We are using the symbol $\log$, which could be taken in any convenient base, such as the natural logarithm ($\ln y$) or the information theoretic base-two logarithm ($\log_2 y$). The MLE procedure then consists of two steps: For the problem at hand, determine a suitable choice for $\mathrm{Pr}[l_i\,|\,x_i,\theta]$. Run any optimization procedure to find the $\theta$ that maximizes $\mathcal{L}(\theta; l, X)$. Example: Logistic regression Let's say you have decided that the logistic function, $G(\theta^T x_i)$, is a good model of the probability of producing a label $l_i$ given the point $x_i$. Under the i.i.d. assumption, we can interpret the label $l_i$ as being the result of a Bernoulli trial (e.g., a biased coin flip), where the probability of success ($l_i=1$) is defined as $g_i = g_i(\theta) \equiv G(\theta^T x_i)$. Thus, $$ \begin{array}{rcl} \mathrm{Pr}[l_i \, | \, x_i, \theta] & \equiv & g_i^{l_i} \cdot \left(1 - g_i\right)^{1 - l_i}. \end{array} $$ The log-likelihood in turn becomes, $$ \begin{array}{rcl} \mathcal{L}(\theta; l, X) & = & \displaystyle \sum_{i=0}^{m-1} l_i \log g_i + (1-l_i) \log (1-g_i) \ & = & l^T \log g + (1-l)^T \log (1-g), \end{array} $$ where $g \equiv (g_0, g_1, \ldots, g_{m-1})^T$. Optimizing the log-likelihood via gradient (steepest) ascent To optimize the log-likelihood with respect to the parameters, $\theta$, you'd like to do the moral equivalent of taking its derivative, setting it to zero, and then solving for $\theta$. For example, recall that in the case of linear regression via least squares minimization, carrying out this process produced an analytic solution for the parameters, which was to solve the normal equations. Unfortunately, for logistic regression---or for most log-likelihoods you are likely to ever write down---you cannot usually derive an analytic solution. Therefore, you will need to resort to numerical optimization procedures. The simplest such procedure is gradient ascent (or steepest ascent), in the case of maximizing some function; if instead you are minimizing the function, then the equivalent procedure is gradient (steepest) descent. The idea is to start with some guess, compute the derivative of the objective function at that guess, and then move in the direction of steepest descent. As it happens, the direction of steepest descent is given by the gradient. More formally, the procedure applied to the log-likelihood is: Start with some initial guess, $\theta(0)$. At each iteration $t \geq 0$ of the procedure, let $\theta(t)$ be the current guess. Compute the direction of steepest descent by evaluating the gradient, $\Delta_t \equiv \nabla_{\theta(t)} \left{\mathcal{L}(\theta(t); l, X)\right}$. Take a step in the direction of the gradient, $\theta(t+1) \leftarrow \theta(t) + \phi \Delta_t$, where $\phi$ is a suitably chosen fudge factor. This procedure should smell eerily like the one in Lab 24! And just as in Lab 24, the tricky bit is how to choose $\phi$, the principled choice of which we will defer until another lab. One additional and slight distinction between this procedure and the Lab 24 procedure is that here we are optimizing using the full dataset, rather than processing data points one at a time. (That is, the step iteration variable $t$ used above is not used in exactly the same way as the step iteration $k$ was used in Lab 24.) Another question is, how do we know this procedure will converge to the global maximum, rather than, say, a local maximum? For that you need a deeper analysis of a specific $\mathcal{L}(\theta; l, X)$, to show, for instance, that it is convex in $\theta$. Example: A gradient ascent algorithm for logistic regression Let's apply the gradient ascent procedure to the logistic regression problem, in order to determine a good $\theta$. Exercise. Show the following: $$ \begin{array}{rcl} \nabla_\theta \left{\mathcal{L}(\theta; l, X)\right} & = & X^T \left[ l - G(X \cdot \theta)\right]. \end{array} $$ Exercise. Implement the gradient ascent procedure to determine $\theta$, and try it out on the sample data. In your solution, we'd like you to store all guesses in the matrix thetas, so that you can later see how the $\theta(t)$ values evolve. To extract a particular column t, use the notation, theta[:, t:t+1]. This notation is necessary to preserve the "shape" of the column as a column vector.
MAX_STEP = 100 PHI = 0.1 # Get the data coordinate matrix, X, and labels vector, l X = points l = labels.astype (dtype=float) # Store *all* guesses, for subsequent analysis thetas = np.zeros ((3, MAX_STEP+1)) for t in range (MAX_STEP): # @YOUSE: Fill in this code pass print "Your (hand) solution:", theta.T.flatten () print "Computed solution:", thetas[:, MAX_STEP] theta_mle = thetas[:, MAX_STEP:] # Generate 0/1 labels for computed discriminant: is_correct_mle = gen_labels_part3 (points, labels, theta_mle) print "Number of misclassified points using MLE:", (len (points) - sum (is_correct_mle))[0] print "\n(Run the code cell below to visualize the results.)" # Visually inspect the above results traces_mle = make_2d_scatter_traces (points, is_correct_mle) traces_mle.append (gen_lin_discr_trace (points, theta_mle)) # Plot it! layout_mle = Layout (xaxis=dict (range=[-1.25, 2.25]), yaxis=dict (range=[-3.25, 2.25])) fig_mle = Figure (data=traces_mle, layout=layout_mle) py.iplot (fig_mle)
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
ee0714913f03515dade5218e5947dcc0
Exercise. Make a contour plot of the log-likelihood and draw the trajectory taken by the $\theta(t)$ values laid on top of it.
def log_likelihood (theta, l, X): # @YOUSE: Complete this function to evaluate the log-likelihood pass n1_ll = 100 x1_ll = np.linspace (-20., 0., n1_ll) n2_ll = 100 x2_ll = np.linspace (-20., 0., n2_ll) x1_ll_grid, x2_ll_grid = np.meshgrid (x1_ll, x2_ll) ll_grid = np.zeros ((n1_ll, n2_ll)) # @YOUSE: Write some code to compute ll_grid, which the following code cell visualizes trace_ll_grid = Contour (x=x1_ll, y=x2_ll, z=ll_grid) trace_thetas = Scatter (x=thetas[1, :], y=thetas[2, :], mode='markers+lines') py.iplot ([trace_ll_grid, trace_thetas])
25--logreg.ipynb
rvuduc/cse6040-ipynbs
bsd-3-clause
55073a9aa91bbf5db7a89a8d27daeeda
<div class="alert alert-success"> <b>EXERCISE</b>: Using groupby(), plot the number of films that have been released each decade in the history of cinema. </div> <div class="alert alert-success"> <b>EXERCISE</b>: Use groupby() to plot the number of "Hamlet" films made each decade. </div> <div class="alert alert-success"> <b>EXERCISE</b>: How many leading (n=1) roles were available to actors, and how many to actresses, in each year of the 1950s? </div> <div class="alert alert-success"> <b>EXERCISE</b>: Use groupby() to determine how many roles are listed for each of The Pink Panther movies. </div> <div class="alert alert-success"> <b>EXERCISE</b>: List, in order by year, each of the films in which Frank Oz has played more than 1 role. </div> <div class="alert alert-success"> <b>EXERCISE</b>: List each of the characters that Frank Oz has portrayed at least twice. </div> Transforms Sometimes you don't want to aggregate the groups, but transform the values in each group. This can be achieved with transform:£
df def normalize(group): return (group - group.mean()) / group.std() df.groupby('key').transform(normalize)
04 - Groupby operations.ipynb
dpshelio/2015-EuroScipy-pandas-tutorial
bsd-2-clause
aab89bffa272396a364c2eec656671aa
<div class="alert alert-success"> <b>EXERCISE</b>: Calculate the ratio of number roles of actors and actresses to the total number of roles per decade and plot this for both in time (tip: you need to do a groupby twice in two steps, once calculating the numbers, and then the ratios. </div> Value counts A useful shortcut to calculate the number of occurences of certain values is value_counts (this is somewhat equivalent to df.groupby(key).size())) For example, what are the most occuring movie titles?
titles.title.value_counts().head()
04 - Groupby operations.ipynb
dpshelio/2015-EuroScipy-pandas-tutorial
bsd-2-clause
41ad8f33c319f6754ee4df6b7425eea0
You can skip the following steps if you just want to load the word2vec model I provided... but this is the raw data approach.
# We need to unzip the data file to use it: !gunzip ../data/yelp/yelp_academic_dataset_reviews.json.gz # Make sure it is there and unzipped: !ls -al ../data/yelp/ ## Make sure this dataset is here and unzipped. data = [] with open("../data/yelp/yelp_academic_dataset_reviews.json") as handle: for line in handle.readlines(): yelp = json.loads(line) data.append(yelp) len(data) data[0] revs = [d[u'text'] for d in data] revs[0]
python/Word2Vec_Yelp.ipynb
arnicas/eyeo_nlp
cc0-1.0
f1bf2e103ec866a615700faaed499ede
What is Word2Vec? "Generally, word2vec is trained using something called a skip-gram model. The skip-gram model, pictures above, attempts to use the vector representation that it learns to predict the words that appear around a given word in the corpus. Essentially, it uses the context of the word as it is used in a variety of books and other literature to derive a meaningful set of numbers. If the “context” of two words is similar, they will have similar vector representations." (Source) "In word2vec, a distributed representation of a word is used. Take a vector with several hundred dimensions (say 1000). Each word is representated by a distribution of weights across those elements. So instead of a one-to-one mapping between an element in the vector and a word, the representation of a word is spread across all of the elements in the vector, and each element in the vector contributes to the definition of many words. If I label the dimensions in a hypothetical word vector (there are no such pre-assigned labels in the algorithm of course), it might look a bit like this:" <img src="img/word2vec-distributed-representation.png"> Source So that means we can do associative logic, or analogies, with these models: <img src="img/word2vec-king-queen-vectors.png"> Specifically, a large enough model of the right kind of language (like a lot of news, or lots of books) will allow you to get "queen" from putting in man, king, woman... and doing vector math on them. So, king-man+woman=queen. Source <img src="img/word2vec-king-queen-composition.png"> Source Creating a word2vec Model with Gensim This takes a while. You don't need to do this, since I already did it. You can skip down to the place where we load the file!
""" An alternate from gensim tutorials - just use all words in the model in a rewiew. No nltk used to split.""" import re class YelpReviews(object): """Iterate over sentences of all plaintext files in a directory """ SPLIT_SENTENCES = re.compile(u"[.!?:]\s+") # split sentences on these characters def __init__(self, objs, field): self.field = field self.objs = objs def __iter__(self): for obj in self.objs: text = obj[self.field] for sentence in self.SPLIT_SENTENCES.split(text): yield gensim.utils.simple_preprocess(sentence, deacc=True) ## Don't do this is you already have the model file! Skip to the step after. ## Otherwise, feel free to do it from scratch. ## We pass in the full data objs and use the YelpReviews class to get the 'text' field for us. #model = gensim.models.Word2Vec(YelpReviews(data, 'text'), min_count=2, workers=2) #model.save('yelp_w2v_model.mod') #model.save_word2vec_format('yelp_w2vformat.mod') # If you already have a model file, load it here: model = gensim.models.Word2Vec.load_word2vec_format('../data/yelp/yelp_w2vformat.mod') model.most_similar(positive=["chicken", "waffles"], topn=20) model.most_similar("waitress") model.vocab.items()[0:5] model.most_similar(['good', 'pizza']) model.most_similar_cosmul(['good', 'pizza']) # less susceptible to extreme outliers model.most_similar(['dog']) model.most_similar(['salon']) model.most_similar(positive=['donuts', 'nypd'], negative=['fireman'])
python/Word2Vec_Yelp.ipynb
arnicas/eyeo_nlp
cc0-1.0
7e732561d9e520c681c670633875520f
Now let's do some basic word sentiment stuff again... for the html side!
import nltk nltk.data.path = ['../nltk_data'] from nltk.corpus import stopwords english_stops = stopwords.words('english') revs[0] tokens = [nltk.word_tokenize(rev) for rev in revs] # this takes a long time. don't run unless you're sure. mystops = english_stops + [u"n't", u'...', u"'ve"] def clean_tokens(tokens, stoplist): """ Lowercases, takes out punct and stopwords and short strings """ return [token.lower() for token in tokens if (token not in string.punctuation) and (token.lower() not in stoplist) and len(token) > 2] clean = [clean_tokens(tok, mystops) for tok in tokens] from nltk import Text allclean = [y for x in clean for y in x] # flatten the list of lists cleantext = Text(allclean) mostcommon = cleantext.vocab().most_common()[0:1500] mostcommon_words = [word[0] for word in mostcommon] mostcommon_words[0:12] # thing required to get the vectors for tsne def get_vectors(words, model): # requires model be in the binary format, not gensim's word_vectors = [] word_labels = [] for word in words: if word in model: word_vectors.append( model[word] ) word_labels.append(word) return word_vectors, word_labels mymodel = gensim.models.Word2Vec.load_word2vec_format('../data/yelp/yelp_w2vformat.mod') vectors, labels = get_vectors(mostcommon_words, mymodel) # should be same as top words above labels[:12] res = ts.tsne(np.asfarray(vectors, dtype='float'), 2, 50, 20)
python/Word2Vec_Yelp.ipynb
arnicas/eyeo_nlp
cc0-1.0
c3e86e64776298ab80b91f03d98e084a
The "AFINN-111.txt" file is another sentiment file.
from collections import defaultdict sentiment = defaultdict(int) with open('../data/sentiment_wordlists/AFINN-111.txt') as handle: for line in handle.readlines(): word = line.split('\t')[0] polarity = line.split('\t')[1] sentiment[word] = int(polarity) sentiment['pho'] sentiment['good'] sentiment['angry'] sentiment['pizza'] def render_json( vectors, labels, filename ): output = [] vectors = np.array(vectors) for i in range(len(vectors)): new_hash = {} new_hash["word"] = str(labels[i]) new_hash["x"] = int(vectors[i][0]) new_hash["y"] = int(vectors[i][1]) new_hash["sentiment"] = sentiment[str(labels[i])] output.append(new_hash) with open(filename, 'w') as handle: json.dump(output, handle) render_json(res, labels, "../outputdata/yelp.json")
python/Word2Vec_Yelp.ipynb
arnicas/eyeo_nlp
cc0-1.0
432e5e5b878faad0c0a01172f4692c02
Import statements
import pandas import numpy as np import pyprind
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
bd5575af9e2b3244f56696f8d52d904c
GRASS import statements
import grass.script as gscript from grass.pygrass.vector import VectorTopo from grass.pygrass.vector.table import DBlinks
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
9d751fa1a66cf2be9e26b6f59faf0822
Function declarations connect to an attribute table
def connectToAttributeTable(map): vector = VectorTopo(map) vector.open(mode='r') dblinks = DBlinks(vector.c_mapinfo) link = dblinks[0] return link.table()
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
af861dc4de3b4c2792df31b64ccb3a9d
finds the nearest element in a vector map (to) for elements in another vector map (from) <br /> calls the GRASS v.distance command
def computeDistance(from_map, to_map): upload = 'dist' result = gscript.read_command('v.distance', from_=from_map, to=to_map, upload=upload, separator='comma', flags='p') return result.split('\n')
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
3570a18425384a7eea65af0007786e03
selects vector features from an existing vector map and creates a new vector map containing only the selected features <br /> calls the GRASS v.extract command
def extractFeatures(input_, type_, output): where = "{0} = '{1}'".format(road_type_field, type_) gscript.read_command('v.extract', input_=input_, where=where, output=output, overwrite=True)
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
3c70b2f691adc2f0e942b575a405d08d
Get unique 'roads' types
roads_table = connectToAttributeTable(map=roads) roads_table.filters.select(road_type_field) cursor = roads_table.execute() result = np.array(cursor.fetchall()) cursor.close() road_types = np.unique(result) print(road_types)
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
c940c9a093dc3915dd669e876af3591d
Get 'points' attribute table
point_table = connectToAttributeTable(map=points) point_table.filters.select() columns = point_table.columns.names() cursor = point_table.execute() result = np.array(cursor.fetchall()) cursor.close() point_data = pandas.DataFrame(result, columns=columns).set_index('cat')
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
bc26a415c525c9d9ef5788813df608df
Loop through 'roads' types and compute the distances from all 'points'
distances = pandas.DataFrame(columns=road_types, index=point_data.index) progress_bar = pyprind.ProgBar(road_types.size, bar_char='█', title='Progress', monitor=True, stream=1, width=50) for type_ in road_types: # update progress bar progress_bar.update(item_id=type_) # extract road data based on type query extractFeatures(input_=roads, type_=type_, output='roads_tmp') # compute distance from points to road type results = computeDistance(points, 'roads_tmp') # save results to data frame distances[type_] = [ d.split(',')[1] for d in results[1:len(results)-1] ] # match index with SiteID distances['SiteID'] = point_data['ID'] distances.set_index('SiteID', inplace=True)
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
1558c1fc67bc7e593b32aff2195a1806
Export distances table to a csv file
distances.to_csv(distance_table_filename, header=False)
Compute distance to roads.ipynb
jacobdein/alpine-soundscapes
mit
c374749112b39bcaa49d5a815adfa3d4
A generalization using accumulation
mapped = list(accumulate(mapped, accumulating)) mapped clear_cache() m,v,r = to_matrix_notation(mapped, f, [n-k for k in range(-2, 19)]) m,v,r m_sym = m.subs(inverted_fibs, simultaneous=True) m_sym[:,0] = m_sym[:,0].subs(f[2],f[1]) m_sym[1,2] = m_sym[1,2].subs(f[2],f[1]) m_sym # the following cell produces an error due to ordering, while `m * v` doesn't. #clear_cache() #m_sym * v to_matrix_notation(mapped, f, [n+k for k in range(-18, 3)])
notebooks/recurrences-unfolding.ipynb
massimo-nocentini/PhD
apache-2.0
3ea2d90d30532abbd1f451576c7dd833
According to A162741, we can generalize the pattern above:
i = symbols('i') d = IndexedBase('d') k_fn_gen = Eq((k+1)*f[n], Sum(d[k,2*k-i]*f[n-i], (i, 0, 2*k))) d_triangle= {d[0,0]:1, d[n,2*n]:1, d[n,k]:d[n-1, k-1]+d[n-1,k]} k_fn_gen, d_triangle mapped = list(accumulate(mapped, accumulating)) mapped # skip this cell to maintain math coerent version def adjust(term): a_wild, b_wild = Wild('a', exclude=[f]), Wild('b') matched = term.match(a_wild*f[n+2] + b_wild) return -(matched[a_wild]-1)*f[n+2] m = fix_combination(mapped,adjust, lambda v, side: Add(v, side)) mapped = list(m) mapped to_matrix_notation(mapped, f, [n-k for k in range(-2, 19)]) mapped = list(accumulate(mapped, accumulating)) mapped to_matrix_notation(mapped, f, [n-k for k in range(-2, 19)]) mapped = list(accumulate(mapped, accumulating)) mapped to_matrix_notation(mapped, f, [n-k for k in range(-2, 19)]) mapped = list(accumulate(mapped, accumulating)) mapped to_matrix_notation(mapped, f, [n-k for k in range(-2, 19)])
notebooks/recurrences-unfolding.ipynb
massimo-nocentini/PhD
apache-2.0
a3b19dd2b0ae4cee285110c2c754dfa2
Unfolding a recurrence with generic coefficients
s = IndexedBase('s') a = IndexedBase('a') swaps_recurrence = Eq(n*s[n],(n+1)*s[n-1]+a[n]) swaps_recurrence boundary_conditions = {s[0]:Integer(0)} swaps_recurrence_spec=dict(recurrence_eq=swaps_recurrence, indexed=s, index=n, terms_cache=boundary_conditions) unfolded = do_unfolding_steps(swaps_recurrence_spec, 4) recurrence_eq = project_recurrence_spec(unfolded, recurrence_eq=True) recurrence_eq factored_recurrence_eq = project_recurrence_spec(factor_rhs_unfolded_rec(unfolded), recurrence_eq=True) factored_recurrence_eq factored_recurrence_eq.rhs.collect(s[n-5]).collect(a[n-4]) factored_recurrence_eq.subs(n,5) recurrence_eq.subs(n, 5) def additional_term(n): return (2*Integer(n)-3)/6 as_dict = {a[n]:additional_term(n) for n in range(1,6)} recurrence_eq.subs(n, 5).subs(as_dict)
notebooks/recurrences-unfolding.ipynb
massimo-nocentini/PhD
apache-2.0
24a3db75263a96804c57b878f124a813
A curious relation about Fibonacci numbers, in matrix notation
d = 10 m = Matrix(d,d, lambda i,j: binomial(n-i,j)*binomial(n-j,i)) m f = IndexedBase('f') fibs = [fibonacci(i) for i in range(50)] mp = (ones(1,d)*m*ones(d,1))[0,0] odd_fibs_eq = Eq(f[2*n+1], mp, evaluate=True) odd_fibs_eq (m*ones(d,1))
notebooks/recurrences-unfolding.ipynb
massimo-nocentini/PhD
apache-2.0
cc7cc78928f02fe3d3d059a120ea4364
Implement Preprocessing Functions The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below: - Lookup Table - Tokenize Punctuation Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call vocab_to_int - Dictionary to go from the id to word, we'll call int_to_vocab Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
import numpy as np import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ unique_words = set(text) vocab_to_int = {word: index for index, word in enumerate(unique_words)} int_to_vocab = {index: word for index, word in enumerate(unique_words)} return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
046ab4e7c07e01fd1c2ff7479389a3f8
Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. Build the Neural Network You'll build the components necessary to build a RNN by implementing the following functions below: - get_inputs - get_init_cell - get_embed - build_rnn - build_nn - get_batches Check the Version of TensorFlow and Access to GPU
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
32d00fe6b75e499b51b1ac6e238d4dd5
Input Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter. - Targets placeholder - Learning Rate placeholder Return the placeholders in the following tuple (Input, Targets, LearningRate)
def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ return tf.placeholder(dtype=tf.int32, shape=(None, None), name="input"), tf.placeholder(dtype=tf.int32, shape=(None, None), name="target"), tf.placeholder(dtype=tf.float32, name="learning_rate") """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
609a7c80ff2f2f9c6b3f48ef0600f866
Build RNN Cell and Initialize Stack one or more BasicLSTMCells in a MultiRNNCell. - The Rnn size should be set using rnn_size - Initalize Cell State using the MultiRNNCell's zero_state() function - Apply the name "initial_state" to the initial state using tf.identity() Return the cell and initial state in the following tuple (Cell, InitialState)
def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)]) return cell, tf.identity(cell.zero_state(batch_size, tf.float32), 'initial_state') """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
d9313c70dbe3cb837c7a2153018cafda
Word Embedding Apply embedding to input_data using TensorFlow. Return the embedded sequence.
def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ embeddings = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embeddings, input_data) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
4bb4a27cc7e9e94cbb0a5b44020b1d59
Build the Neural Network Apply the functions you implemented above to: - Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function. - Build RNN using cell and your build_rnn(cell, inputs) function. - Apply a fully connected layer with a linear activation and vocab_size as the number of outputs. Return the logits and final state in the following tuple (Logits, FinalState)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :param embed_dim: Number of embedding dimensions :return: Tuple (Logits, FinalState) """ embeddings = get_embed(input_data, vocab_size, embed_dim) outputs, final_state = build_rnn(cell, embeddings) # max_time x batch_size x rnn_size logits = tf.contrib.layers.fully_connected(outputs, vocab_size, None) return logits, final_state # Test was failing because it was written for some previous version of Tensorflow # tests.test_build_nn(build_nn)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
b5b3767dae0715a2f7d8858fdb751e4c
Batches Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements: - The first element is a single batch of input with the shape [batch size, sequence length] - The second element is a single batch of targets with the shape [batch size, sequence length] If you can't fill the last batch with enough data, drop the last batch. For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following: ``` [ # First Batch [ # Batch of Input [[ 1 2], [ 7 8], [13 14]] # Batch of targets [[ 2 3], [ 8 9], [14 15]] ] # Second Batch [ # Batch of Input [[ 3 4], [ 9 10], [15 16]] # Batch of targets [[ 4 5], [10 11], [16 17]] ] # Third Batch [ # Batch of Input [[ 5 6], [11 12], [17 18]] # Batch of targets [[ 6 7], [12 13], [18 1]] ] ] ``` Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ total_batches = len(int_text) // (batch_size * seq_length) int_text = np.asarray(int_text[:(total_batches * batch_size * seq_length)]) label_text = np.asarray([0] * int_text.shape[0]) label_text[:-1], label_text[-1] = int_text[1:], int_text[0] int_text = np.reshape(int_text, (-1, batch_size, seq_length)) label_text = np.reshape(label_text, (-1, batch_size, seq_length)) # print(np.concatenate((int_text[:, None, :, :], label_text[:, None, :, :]), axis=1)[0]) return np.concatenate((int_text[:, None, :, :], label_text[:, None, :, :]), axis=1) # Test was not generic enough i.e. it was passing only a specific arranegement of numbers. # tests.test_get_batches(get_batches)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
b84be356705a7de904d069efde381539
Neural Network Training Hyperparameters Tune the following parameters: Set num_epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set embed_dim to the size of the embedding. Set seq_length to the length of sequence. Set learning_rate to the learning rate. Set show_every_n_batches to the number of batches the neural network should print progress.
# Number of Epochs num_epochs = 100 # Batch Size batch_size = 256 # RNN Size rnn_size = 256 # Embedding Dimension Size embed_dim = 300 # Sequence Length seq_length = 10 # Learning Rate learning_rate = 0.01 # Show stats for every n number of batches show_every_n_batches = 20 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save'
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
bb3c6ad307ea92cec79fe74bab47c956
Implement Generate Functions Get Tensors Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names: - "input:0" - "initial_state:0" - "final_state:0" - "probs:0" Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ return (loaded_graph.get_tensor_by_name('input:0'), loaded_graph.get_tensor_by_name('initial_state:0'), loaded_graph.get_tensor_by_name('final_state:0'), loaded_graph.get_tensor_by_name('probs:0')) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
7bc7026fcf310f195dc733cd91a83c1d
Choose Word Implement the pick_word() function to select the next word using probabilities.
def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ probabilities = np.reshape(probabilities, (len(int_to_vocab.keys()),)) return int_to_vocab[np.argmax(probabilities)] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word)
tv-script-generation/dlnd_tv_script_generation.ipynb
mu4farooqi/deep-learning-projects
gpl-3.0
65738588128e4a448b8a99788c78ba6f
Index comparison See how indexes affect queries First without then with
scratch_db.zips.drop_indexes() count = scratch_db.zips.find().count() city_count = scratch_db.zips.find({"city": "FLAGSTAFF"}).count() city_explain = scratch_db.zips.find({"city": "FLAGSTAFF"}).explain()['executionStats'] print(count) print(city_count) print(city_explain) scratch_db.zips.drop_indexes() scratch_db.zips.create_index([("city", ASCENDING)]) count = scratch_db.zips.find().count() city_count = scratch_db.zips.find({"city": "FLAGSTAFF"}).count() city_explain = scratch_db.zips.find({"city": "FLAGSTAFF"}).explain()['executionStats'] print(count) print(city_count) print(city_explain)
MongoDB.ipynb
0Rick0/Fontys-DS-GCD
mit
823c2a1a14ffe9742c212f6022b465ee
You can see with the index it's execution is a bit different. Seeing the executionTimeMillis parameter shows that the second one is executed much faster. This is because the index allow you to search the index instead of all the documents. Some other information about the dataset
print("Amount of cities per state:") pipeline = [ {"$unwind": "$state"}, {"$group": {"_id": "$state", "count": {"$sum": 1}}}, {"$sort": SON([("count", -1), ("_id", -1)])} ] results = scratch_db.zips.aggregate(pipeline) for result in results: print("State %s: %d" % tuple(result.values())) print("Amount of cities with fewer then 50 people") lt = scratch_db.zips.find({"pop": {"$lt": 50}}) print("%d cities" % lt.count()) for city in lt.limit(10): print("%s: %d" % (city['city'], city['pop']))
MongoDB.ipynb
0Rick0/Fontys-DS-GCD
mit
24751a8b8d9c8ca56b2f3b5b1662b836
Geolocation Mongodb also has build in support for geolocation indexes This allows for searching for example nearby shops for a given location
scratch_db.zips.create_index([("loc", "2dsphere")]) flagstaff = scratch_db.zips.find_one({"city": "FLAGSTAFF"}) nearby = scratch_db.zips.find({"loc": { "$near": { "$geometry": { 'type': 'Point', 'coordinates': flagstaff['loc'] }, "$maxDistance": 50000 } }}) for city in nearby: print(city['city'])
MongoDB.ipynb
0Rick0/Fontys-DS-GCD
mit
79f7048165e45d03b339e357477e71f8
Create a managed tabular dataset from a CSV A Managed dataset can be used to create an AutoML model or a custom model.
# Create a managed tabular dataset ds = # TODO 1: Your code goes here(display_name="abalone", gcs_source=[gcs_csv_path]) ds.resource_name
courses/machine_learning/deepdive2/production_ml/labs/sdk_metric_parameter_tracking_for_custom_jobs.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
e1274a3dc9af2399ee332664fecbd2cc
Start a new experiment run to track training parameters and start the training job. Note that this operation will take around 10 mins.
aiplatform.start_run("custom-training-run-1") # Change this to your desired run name parameters = {"epochs": 10, "num_units": 64} aiplatform.log_params(parameters) # Launch the training job model = # TODO 2: Your code goes here( ds, replica_count=1, model_display_name="abalone-model", args=[f"--epochs={parameters['epochs']}", f"--num_units={parameters['num_units']}"], )
courses/machine_learning/deepdive2/production_ml/labs/sdk_metric_parameter_tracking_for_custom_jobs.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
23158e591787bf2f631c1432c8093537
Deploy Model and calculate prediction metrics Deploy model to Google Cloud. This operation will take 10-20 mins.
# Deploy the model endpoint = # TODO 3: Your code goes here(machine_type="n1-standard-4")
courses/machine_learning/deepdive2/production_ml/labs/sdk_metric_parameter_tracking_for_custom_jobs.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
fe17029d76867ef02547be6fc5a6a1d8
Perform online prediction.
# Perform online prediction using endpoint prediction = # TODO 4: Your code goes here(test_dataset.tolist()) prediction
courses/machine_learning/deepdive2/production_ml/labs/sdk_metric_parameter_tracking_for_custom_jobs.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
1d13d9099e49adc11d7b36e2e0b3878a
Extract all parameters and metrics created during this experiment.
# Extract all parameters and metrics of the experiment # TODO 5: Your code goes here
courses/machine_learning/deepdive2/production_ml/labs/sdk_metric_parameter_tracking_for_custom_jobs.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
f04b7a33e97bc4c6800ca1fd8098b820
XGBoost HP Tuning on AI Platform This notebook trains a model on Ai Platform using Hyperparameter Tuning to predict a car's Miles Per Gallon. It uses Auto MPG Data Set from UCI Machine Learning Repository. Citation: Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. How to train your model on AI Platform with HP tuning. Using HP Tuning for training can be done in a few steps: 1. Create your python model file 1. Add argument parsing for the hyperparameter values. (These values are chosen for you in this notebook) 1. Add code to download your data from Google Cloud Storage so that AI Platform can use it 1. Add code to track the performance of your hyperparameter values. 1. Add code to export and save the model to Google Cloud Storage once AI Platform finishes training the model 1. Prepare a package 1. Submit the training job Prerequisites Before you jump in, let’s cover some of the different tools you’ll be using to get HP tuning up and running on AI Platform. Google Cloud Platform lets you build and host applications and websites, store data, and analyze data on Google's scalable infrastructure. AI Platform is a managed service that enables you to easily build machine learning models that work on any type of data, of any size. Google Cloud Storage (GCS) is a unified object storage for developers and enterprises, from live data serving to data analytics/ML to data archiving. Cloud SDK is a command line tool which allows you to interact with Google Cloud products. In order to run this notebook, make sure that Cloud SDK is installed in the same environment as your Jupyter kernel. Overview of Hyperparameter Tuning - Hyperparameter tuning takes advantage of the processing infrastructure of Google Cloud Platform to test different hyperparameter configurations when training your model. Part 0: Setup Create a project on GCP Create a Google Cloud Storage Bucket Enable AI Platform Training and Prediction and Compute Engine APIs Install Cloud SDK Install XGBoost [Optional: used if running locally] Install pandas [Optional: used if running locally] Install cloudml-hypertune [Optional: used if running locally] These variables will be needed for the following steps. * TRAINER_PACKAGE_PATH &lt;./auto_mpg_hp_tuning&gt; - A packaged training application that will be staged in a Google Cloud Storage location. The model file created below is placed inside this package path. * MAIN_TRAINER_MODULE &lt;auto_mpg_hp_tuning.train&gt; - Tells AI Platform which file to execute. This is formatted as follows <folder_name.python_file_name> * JOB_DIR &lt;gs://$BUCKET_ID/xgboost_learn_job_dir&gt; - The path to a Google Cloud Storage location to use for job output. * RUNTIME_VERSION &lt;1.9&gt; - The version of AI Platform to use for the job. If you don't specify a runtime version, the training service uses the default AI Platform runtime version 1.0. See the list of runtime versions for more information. * PYTHON_VERSION &lt;3.5&gt; - The Python version to use for the job. Python 3.5 is available with runtime version 1.4 or greater. If you don't specify a Python version, the training service uses Python 2.7. * HPTUNING_CONFIG &lt;hptuning_config.yaml&gt; - Path to the job configuration file. Replace: * PROJECT_ID &lt;YOUR_PROJECT_ID&gt; - with your project's id. Use the PROJECT_ID that matches your Google Cloud Platform project. * BUCKET_ID &lt;YOUR_BUCKET_ID&gt; - with the bucket id you created above. * JOB_DIR &lt;gs://YOUR_BUCKET_ID/xgboost_job_dir&gt; - with the bucket id you created above. * REGION &lt;REGION&gt; - select a region from here or use the default 'us-central1'. The region is where the model will be deployed.
# Replace <PROJECT_ID> and <BUCKET_ID> with proper Project and Bucket ID's: %env PROJECT_ID <PROJECT_ID> %env BUCKET_ID <BUCKET_ID> %env JOB_DIR gs://<BUCKET_ID>/xgboost_job_dir %env REGION us-central1 %env TRAINER_PACKAGE_PATH ./auto_mpg_hp_tuning %env MAIN_TRAINER_MODULE auto_mpg_hp_tuning.train %env RUNTIME_VERSION 1.9 %env PYTHON_VERSION 3.5 %env HPTUNING_CONFIG hptuning_config.yaml ! mkdir auto_mpg_hp_tuning
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
9d3a3523b29df3326c8f2bb8186e8e94
The data The Auto MPG Data Set that this sample uses for training is provided by the UC Irvine Machine Learning Repository. We have hosted the data on a public GCS bucket gs://cloud-samples-data/ml-engine/auto_mpg/. The data has been pre-processed to remove rows with incomplete data so as not to create additional steps for this notebook. Training file is auto-mpg.data Note: Your typical development process with your own data would require you to upload your data to GCS so that AI Platform can access that data. However, in this case, we have put the data on GCS to avoid the steps of having you download the data from UC Irvine and then upload the data to GCS. Citation: Dua, D. and Karra Taniskidou, E. (2017). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. Disclaimer This dataset is provided by a third party. Google provides no representation, warranty, or other guarantees about the validity or any other aspects of this dataset. Part 1: Create your python model file First, we'll create the python model file (provided below) that we'll upload to AI Platform. This is similar to your normal process for creating a XGBoost model. However, there are a few key differences: 1. Downloading the data from GCS at the start of your file, so that AI Platform can access the data. 1. Exporting/saving the model to GCS at the end of your file, so that you can use it for predictions. 1. Define a command-line argument in your main training module for each tuned hyperparameter. 1. Use the value passed in those arguments to set the corresponding hyperparameter in your application's XGBoost code. 1. Use cloudml-hypertune to track your training jobs metrics. The code in this file first handles the hyperparameters passed to the file from AI Platform. Then it loads the data into a pandas DataFrame that can be used by XGBoost. Then the model is fit against the training data and the metrics for that data are shared with AI Platform. Lastly, Python's built in pickle library is used to save the model to a file that can be uploaded to AI Platform's prediction service. Note: In normal practice you would want to test your model locally on a small dataset to ensure that it works, before using it with your larger dataset on AI Platform. This avoids wasted time and costs. Setup the imports and helper functions
%%writefile ./auto_mpg_hp_tuning/train.py import argparse import datetime import os import pandas as pd import subprocess import pickle from google.cloud import storage import hypertune import xgboost as xgb from random import shuffle def split_dataframe(dataframe, rate=0.8): indices = dataframe.index.values.tolist() length = len(dataframe) shuffle(indices) train_size = int(length * rate) train_indices = indices[:train_size] test_indices = indices[train_size:] return dataframe.iloc[train_indices], dataframe.iloc[test_indices]
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
c467534225c77293749c3d273fbad521
Load the hyperparameter values that are passed to the model during training. In this tutorial, the Lasso regressor is used, because it has several parameters that can be used to help demonstrate how to choose HP tuning values. (The range of values are set below in the configuration file for the HP tuning values.)
%%writefile -a ./auto_mpg_hp_tuning/train.py parser = argparse.ArgumentParser() parser.add_argument( '--job-dir', # handled automatically by AI Platform help='GCS location to write checkpoints and export models', required=True ) parser.add_argument( '--max_depth', # Specified in the config file help='Maximum depth of the XGBoost tree. default: 3', default=3, type=int ) parser.add_argument( '--n_estimators', # Specified in the config file help='Number of estimators to be created. default: 100', default=100, type=int ) parser.add_argument( '--booster', # Specified in the config file help='which booster to use: gbtree, gblinear or dart. default: gbtree', default='gbtree', type=str ) args = parser.parse_args()
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
238b515a68f7fb99f41d3758051b00cb
Add code to download the data from GCS In this case, using the publicly hosted data,AI Platform will then be able to use the data when training your model.
%%writefile -a ./auto_mpg_hp_tuning/train.py # Public bucket holding the auto mpg data bucket = storage.Client().bucket('cloud-samples-data') # Path to the data inside the public bucket blob = bucket.blob('ml-engine/auto_mpg/auto-mpg.data') # Download the data blob.download_to_filename('auto-mpg.data') # --------------------------------------- # This is where your model code would go. Below is an example model using the auto mpg dataset. # --------------------------------------- # Define the format of your input data including unused columns # (These are the columns from the auto-mpg data files) COLUMNS = [ 'mpg', 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model-year', 'origin', 'car-name' ] FEATURES = [ 'cylinders', 'displacement', 'horsepower', 'weight', 'acceleration', 'model-year', 'origin' ] TARGET = 'mpg' # Load the training auto mpg dataset with open('./auto-mpg.data', 'r') as train_data: raw_training_data = pd.read_csv(train_data, header=None, names=COLUMNS, delim_whitespace=True) raw_training_data = raw_training_data[FEATURES + [TARGET]] train_df, test_df = split_dataframe(raw_training_data, 0.8)
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
60de3ec00d6d59251f39045f68965eaa
Use the Hyperparameters Use the Hyperparameter values passed in those arguments to set the corresponding hyperparameters in your application's XGBoost code.
%%writefile -a ./auto_mpg_hp_tuning/train.py # Create the regressor, here we will use a Lasso Regressor to demonstrate the use of HP Tuning. # Here is where we set the variables used during HP Tuning from # the parameters passed into the python script regressor = xgb.XGBRegressor(max_depth=args.max_depth, n_estimators=args.n_estimators, booster=args.booster ) # Transform the features and fit them to the regressor regressor.fit(train_df[FEATURES], train_df[TARGET])
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
9368bb51ec2216af17d897bf9cba0a0c
Report the mean accuracy as hyperparameter tuning objective metric.
%%writefile -a ./auto_mpg_hp_tuning/train.py # Calculate the mean accuracy on the given test data and labels. score = regressor.score(test_df[FEATURES], test_df[TARGET]) # The default name of the metric is training/hptuning/metric. # We recommend that you assign a custom name. The only functional difference is that # if you use a custom name, you must set the hyperparameterMetricTag value in the # HyperparameterSpec object in your job request to match your chosen name. # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#HyperparameterSpec hpt = hypertune.HyperTune() hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag='my_metric_tag', metric_value=score, global_step=1000)
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
1bb31cc4202de2a1db2135a49077bd74
Export and save the model to GCS
%%writefile -a ./auto_mpg_hp_tuning/train.py # Export the model to a file model_filename = 'model.pkl' with open(model_filename, "wb") as f: pickle.dump(regressor, f) # Example: job_dir = 'gs://BUCKET_ID/xgboost_job_dir/1' job_dir = args.job_dir.replace('gs://', '') # Remove the 'gs://' # Get the Bucket Id bucket_id = job_dir.split('/')[0] # Get the path bucket_path = job_dir[len('{}/'.format(bucket_id)):] # Example: 'xgboost_job_dir/1' # Upload the model to GCS bucket = storage.Client().bucket(bucket_id) blob = bucket.blob('{}/{}'.format( bucket_path, model_filename)) blob.upload_from_filename(model_filename)
notebooks/xgboost/HyperparameterTuningWithXGBoostInCMLE.ipynb
GoogleCloudPlatform/cloudml-samples
apache-2.0
d69c5cd01d6a5f1ff56a07463737b10f