repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
infilect/ml-course1 | keras-notebooks/Transfer-Learning/5.3.1 Keras and TF Integration.ipynb | mit | import tensorflow as tf
tf.__version__
from tensorflow.contrib import keras
"""
Explanation: Tight Integration
End of explanation
"""
from keras.datasets import cifar100
(X_train, Y_train), (X_test, Y_test) = cifar100.load_data(label_mode='fine')
from keras import backend as K
img_rows, img_cols = 32, 32
if K.image_data_format() == 'channels_first':
shape_ord = (3, img_rows, img_cols)
else: # channel_last
shape_ord = (img_rows, img_cols, 3)
shape_ord
X_train.shape
import numpy as np
nb_classes = len(np.unique(Y_train))
from keras.applications import vgg16
from keras.layers import Input
vgg16_model = vgg16.VGG16(weights='imagenet', include_top=False,
input_tensor=Input(shape_ord))
vgg16_model.summary()
for layer in vgg16_model.layers:
layer.trainable = False # freeze layer
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
x = Flatten(input_shape=vgg16_model.output.shape)(vgg16_model.output)
x = Dense(4096, activation='relu', name='ft_fc1')(x)
x = Dropout(0.5)(x)
x = BatchNormalization()(x)
predictions = Dense(nb_classes, activation = 'softmax')(x)
from keras.models import Model
#create graph of your new model
model = Model(inputs=vgg16_model.input, outputs=predictions)
#compile the model
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
"""
Explanation: Tensorboard Integration
End of explanation
"""
from keras.callbacks import TensorBoard
"""
Explanation: TensorBoard Callback
End of explanation
"""
## one-hot Encoding of labels (1 to 100 classes)
from keras.utils import np_utils
Y_train.shape
Y_train = np_utils.to_categorical(Y_train)
Y_train.shape
def generate_batches(X, Y, batch_size=128):
""""""
# Iterations has to go indefinitely
start = 0
while True:
yield (X[start:start+batch_size], Y[start:start+batch_size])
start=batch_size
batch_size = 64
steps_per_epoch = np.floor(X_train.shape[0] / batch_size)
model.fit_generator(generate_batches(X_train, Y_train, batch_size=batch_size),
steps_per_epoch=steps_per_epoch, epochs=20, verbose=1,
callbacks=[TensorBoard(log_dir='./tf_logs', histogram_freq=10,
write_graph=True, write_images=True,
embeddings_freq=10,
embeddings_layer_names=['block1_conv2',
'block5_conv1',
'ft_fc1'],
embeddings_metadata=None)])
"""
Explanation: ```python
Arguments
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved.
```
See the details
about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed.
End of explanation
"""
%%bash
python -m tensorflow.tensorboard --logdir=./tf_logs
"""
Explanation: Runing Tensorboard
End of explanation
"""
import operator
import threading
from functools import reduce
import keras
import keras.backend as K
from keras.engine import Model
import numpy as np
import tensorflow as tf
import time
from keras.layers import Conv2D
from tqdm import tqdm
def prod(factors):
return reduce(operator.mul, factors, 1)
TRAINING = True
with K.get_session() as sess:
shp = [10, 200, 200, 3]
shp1 = [10, 7, 7, 80]
inp = K.placeholder(shp)
inp1 = K.placeholder(shp1)
queue = tf.FIFOQueue(20, [tf.float32, tf.float32], [shp, shp1])
x1, y1 = queue.dequeue()
enqueue = queue.enqueue([inp, inp1])
model = keras.applications.ResNet50(False, "imagenet", x1, shp[1:])
for i in range(3):
model.layers.pop()
model.layers[-1].outbound_nodes = []
model.outputs = [model.layers[-1].output]
output = model.outputs[0] # 7x7
# Reduce filter size to avoid OOM
output = Conv2D(32, (1, 1), padding="same", activation='relu')(output)
output3 = Conv2D(5 * (4 + 11 + 1), (1, 1), padding="same", activation='relu')(
output) # YOLO output B (4 + nb_class +1)
cost = tf.reduce_sum(tf.abs(output3 - y1))
optimizer = tf.train.RMSPropOptimizer(0.001).minimize(cost)
sess.run(tf.global_variables_initializer())
def get_input():
# Super long processing I/O bla bla bla
return np.arange(prod(shp)).reshape(shp).astype(np.float32), np.arange(prod(shp1)).reshape(shp1).astype(
np.float32)
def generate(coord, enqueue_op):
while not coord.should_stop():
inp_feed, inp1_feed = get_input()
sess.run(enqueue_op, feed_dict={inp: inp_feed, inp1: inp1_feed})
start = time.time()
for i in tqdm(range(10)): # EPOCH
for j in range(30): # Batch
x,y = get_input()
optimizer_, s = sess.run([optimizer, queue.size()],
feed_dict={x1:x,y1:y, K.learning_phase(): int(TRAINING)})
print("Took : ", time.time() - start)
coordinator = tf.train.Coordinator()
threads = [threading.Thread(target=generate, args=(coordinator, enqueue)) for i in range(10)]
for t in threads:
t.start()
start = time.time()
for i in tqdm(range(10)): # EPOCH
for j in range(30): # Batch
optimizer_, s = sess.run([optimizer, queue.size()],
feed_dict={K.learning_phase(): int(TRAINING)})
print("Took : ", time.time() - start)
def clear_queue(queue, threads):
while any([t.is_alive() for t in threads]):
_, s = sess.run([queue.dequeue(), queue.size()])
print(s)
coordinator.request_stop()
clear_queue(queue, threads)
coordinator.join(threads)
print("DONE Queue")
"""
Explanation: tf.Queue integration with Keras
Source: https://gist.github.com/Dref360/43e20eda5eb5834b61bc06a4c1855b29
End of explanation
"""
|
azhurb/deep-learning | tensorboard/Anna_KaRNNa_Hyperparameters.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
"""
Explanation: First we'll load the text file and convert it into integers for our network to use.
End of explanation
"""
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
"""
Explanation: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the split_frac keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
End of explanation
"""
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_cells"):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN outputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
tf.summary.histogram('softmax_w', softmax_w)
tf.summary.histogram('softmax_b', softmax_b)
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
tf.summary.histogram('predictions', preds)
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
tf.summary.scalar('cost', cost)
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer', 'merged']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
"""
Explanation: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
End of explanation
"""
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
"""
Explanation: Hyperparameters
Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are lstm_size and num_layers. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
End of explanation
"""
!mkdir -p checkpoints/anna
def train(model, epochs, file_writer):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
summary, batch_loss, new_state, _ = sess.run([model.merged, model.cost,
model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
file_writer.add_summary(summary, iteration)
epochs = 20
batch_size = 100
num_steps = 100
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
for lstm_size in [128,256,512]:
for num_layers in [1, 2]:
for learning_rate in [0.002, 0.001]:
log_string = 'logs/4/lr={},rl={},ru={}'.format(learning_rate, num_layers, lstm_size)
writer = tf.summary.FileWriter(log_string)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
train(model, epochs, writer)
tf.train.get_checkpoint_state('checkpoints/anna')
"""
Explanation: Training
Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I calculate the validation loss and save a checkpoint.
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
|
mtasende/Machine-Learning-Nanodegree-Capstone | notebooks/prod/.ipynb_checkpoints/n08_simple_q_learner_1000_states-checkpoint.ipynb | mit | # Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent import Agent
from functools import partial
NUM_THREADS = 1
LOOKBACK = 252*2 + 28
STARTING_DAYS_AHEAD = 20
POSSIBLE_FRACTIONS = [0.0, 1.0]
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_train_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
agents = [Agent(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=0,
name='Agent_{}'.format(i)) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
"""
Explanation: In this notebook a simple Q learner will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value). One initial attempt was made to train the Q-learner with multiple processes, but it was unsuccessful.
End of explanation
"""
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,)
show_results([results_list], data_in_df, graph=True)
"""
Explanation: Let's show the symbols data, to see how good the recommender has to be.
End of explanation
"""
env, num_states, num_actions = sim.initialize_env(total_data_test_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
"""
Explanation: Let's run the trained agent, with the test set
First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality).
End of explanation
"""
env, num_states, num_actions = sim.initialize_env(total_data_test_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
import pickle
with open('../../data/best_agent.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
"""
Explanation: And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
End of explanation
"""
|
rueedlinger/machine-learning-snippets | notebooks/automl/classification_with_automl.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn import datasets, metrics, model_selection, preprocessing, pipeline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import autosklearn.classification
wine = datasets.load_wine()
print(wine.DESCR)
X = pd.DataFrame(wine.data, columns=wine.feature_names)
y = wine.target
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, train_size=0.5, stratify=y)
df_train = pd.DataFrame(y_train, columns=['target'])
df_train['type'] = 'train'
df_test = pd.DataFrame(y_test, columns=['target'])
df_test['type'] = 'test'
df_set = df_train.append(df_test)
_ = sns.countplot(x='target', hue='type', data=df_set)
print('train samples:', len(X_train))
print('test samples', len(X_test))
"""
Explanation: Classification with AutoML (auto-sklearn)
End of explanation
"""
model = autosklearn.classification.AutoSklearnClassifier(time_left_for_this_task=30, ensemble_size=3)
%%capture
# ignore oput from model fit with capture magic command
model.fit(X_train, y_train)
"""
Explanation: Note: We do some restrictions here running time and number of ensembles, because the model fitting would not take much longer. So this is just an example how you could run AutoML.
End of explanation
"""
for m in model.get_models_with_weights():
print(m)
predicted = model.predict(X_test)
confusion_matrix = pd.DataFrame(metrics.confusion_matrix(y_test, predicted))
confusion_matrix
_ = sns.heatmap(confusion_matrix, annot=True, cmap="Blues")
print("accuracy: {:.3f}".format(metrics.accuracy_score(y_test, predicted)))
print("precision: {:.3f}".format(metrics.precision_score(y_test, predicted, average='weighted')))
print("recall: {:.3f}".format(metrics.recall_score(y_test, predicted, average='weighted')))
print("f1 score: {:.3f}".format(metrics.f1_score(y_test, predicted, average='weighted')))
"""
Explanation: Print the final ensemble constructed by auto-sklearn
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ncc/cmip6/models/sandbox-3/toplevel.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-3', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: NCC
Source ID: SANDBOX-3
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:25
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
tensorflow/docs-l10n | site/ja/lite/performance/post_training_quant.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
"""
Explanation: ใใฌใผใใณใฐๅพใฎใใคใใใใฏใฌใณใธ้ๅญๅ
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_quant"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">View on TensorFlow.org</a> </td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/lite/performance/post_training_quant.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a> </td>
<td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/lite/performance/post_training_quant.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a> </td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/lite/performance/post_training_quant.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png"> ใใผใใใใฏใใใฆใณใญใผใ</a></td>
<td> <a href="https://tfhub.dev/google/imagenet/resnet_v2_101/classification/4"><img src="https://www.tensorflow.org/images/hub_logo_32px.png"> TF Hub ใขใใซใๅ็
ง</a>
</td>
</table>
ๆฆ่ฆ
TensorFlow Lite ใงใฏใTensorFlow ใฐใฉใๅฎ็พฉใใ TensorFlow Lite ใใฉใใใใใใกๅฝขๅผใธใฎใขใใซๅคๆใฎไธ้จใจใใฆใ้ใฟใ 8 ใใใ็ฒพๅบฆใซๅคๆใงใใใใใซใชใใพใใใใใคใใใใฏใฌใณใธ้ๅญๅใฏใใขใใซใตใคใบใ 4 ๅใฎ 1 ใซๅๆธใใพใใใใใซใTFLite ใฏใใขใฏใใฃใใผใทใงใณใฎใชใณใถใใฉใคใฎ้ๅญๅใใใณ้้ๅญๅใใตใใผใใใไปฅไธใๅฏ่ฝใซใใพใใ
ๅฏ่ฝใชๅ ดๅใใใ้ซ้ใชๅฎ่ฃ
ใฎใใใซ้ๅญๅใใใใซใผใใซใไฝฟ็จใใใ
ใฐใฉใใฎ็ฐใชใ้จๅใซๆตฎๅๅฐๆฐ็นใซใผใใซใจ้ๅญๅใซใผใใซใไฝฟ็จใใใ
ใขใฏใใฃใใผใทใงใณใฏๅธธใซๆตฎๅๅฐๆฐ็นใงไฟๅญใใใพใใ้ๅญๅใซใผใใซใใตใใผใใใๆผ็ฎใฎๅ ดๅใใขใฏใใฃใใผใทใงใณใฏๅฆ็ใฎๅใซๅ็ใซ 8 ใใใใฎ็ฒพๅบฆใซ้ๅญๅใใใๅฆ็ๅพใซๆตฎๅๅฐๆฐ็น็ฒพๅบฆใซ้้ๅญๅใใใพใใๅคๆใใใใขใใซใซใใฃใฆ็ฐใชใใพใใใ็ด็ฒใชๆตฎๅๅฐๆฐ็นใฎ่จ็ฎใใ้ซ้ใซใชใๅฏ่ฝๆงใใใใพใใ
้ๅญๅ่ช่ญใใฌใผใใณใฐใจใฏๅฏพ็
ง็ใซใใใฎๆนๆณใงใฏใ้ใฟใฏใใฌใผใใณใฐๅพใซ้ๅญๅใใใใขใฏใใฃใใผใทใงใณใฏๆจ่ซๆใซๅ็ใซ้ๅญๅใใใพใใใใใใฃใฆใใขใใซใฎ้ใฟใฏๅ้ๅญๅใใใใ้ๅญๅใซใใ่ชคๅทฎใ่ฃๆญฃใใใพใใใ้ๅญๅใขใใซใฎ็ฒพๅบฆใใใงใใฏใใฆใ็ฒพๅบฆไฝไธใ่จฑๅฎน็ฏๅฒๅ
ใงใใใใจใ็ขบ่ชใใใใจใ้่ฆใงใใ
ใใฎใใฅใผใใชใขใซใงใฏใMNIST ใขใใซใๆฐ่ฆใซใใฌใผใใณใฐใใTensorFlow ใงใใฎ็ฒพๅบฆใ็ขบ่ชใใฆใใใใขใใซใใใคใใใใฏใฌใณใธ้ๅญๅใไฝฟ็จใใ Tensorflow Lite ใใฉใใใใใใกใซๅคๆใใพใใๆๅพใซใๅคๆใใใใขใใซใฎ็ฒพๅบฆใ็ขบ่ชใใๅ
ใฎ float ใขใใซใจๆฏ่ผใใพใใ
MNIST ใขใใซใฎๆง็ฏ
ใปใใใขใใ
End of explanation
"""
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
"""
Explanation: TensorFlow ใขใใซใฎใใฌใผใใณใฐ
End of explanation
"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
"""
Explanation: ใใฎไพใงใฏใใขใใซใ 1 ใจใใใฏใงใใฌใผใใณใฐใใใฎใงใใใฌใผใใณใฐใฎ็ฒพๅบฆใฏ 96๏ผ
ไปฅไธใซใชใใพใใ
TensorFlow Lite ใขใใซใซๅคๆใใ
Python TFLiteConverter ใไฝฟ็จใใฆใใใฌใผใใณใฐๆธใฟใขใใซใ TensorFlow Lite ใขใใซใซๅคๆใงใใใใใซใชใใพใใใ
ๆฌกใซใTFLiteConverterใไฝฟ็จใใฆใขใใซใ่ชญใฟ่พผใฟใพใใ
End of explanation
"""
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
"""
Explanation: tflite ใใกใคใซใซๆธใ่พผใฟใพใใ
End of explanation
"""
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_quant_model)
"""
Explanation: ใจใฏในใใผใๆใซใขใใซใ้ๅญๅใใใซใฏใoptimizationsใใฉใฐใ่จญๅฎใใฆใตใคใบใๆ้ฉๅใใพใใ
End of explanation
"""
!ls -lh {tflite_models_dir}
"""
Explanation: ็ๆใใใใใกใคใซใฎใตใคใบใ็ด1/4ใงใใใใจใซๆณจๆใใฆใใ ใใใ
End of explanation
"""
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file))
interpreter_quant.allocate_tensors()
"""
Explanation: TFLite ใขใใซใๅฎ่กใใ
Python TensorFlow Lite ใคใณใฟใผใใชใฟใไฝฟ็จใใฆ TensorFlow Lite ใขใใซใๅฎ่กใใพใใ
ใขใใซใใคใณใฟใผใใชใฟใซ่ชญใฟ่พผใ
End of explanation
"""
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
"""
Explanation: 1 ใคใฎ็ปๅใงใขใใซใใในใใใ
End of explanation
"""
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
"""
Explanation: ใขใใซใ่ฉไพกใใ
End of explanation
"""
print(evaluate_model(interpreter_quant))
"""
Explanation: ใใคใใใใฏใฌใณใธ้ๅญๅใขใใซใฎ่ฉไพกใ็นฐใ่ฟใใฆใไปฅไธใๅๅพใใ
End of explanation
"""
import tensorflow_hub as hub
resnet_v2_101 = tf.keras.Sequential([
keras.layers.InputLayer(input_shape=(224, 224, 3)),
hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/4")
])
converter = tf.lite.TFLiteConverter.from_keras_model(resnet_v2_101)
# Convert to TF Lite without quantization
resnet_tflite_file = tflite_models_dir/"resnet_v2_101.tflite"
resnet_tflite_file.write_bytes(converter.convert())
# Convert to TF Lite with quantization
converter.optimizations = [tf.lite.Optimize.DEFAULT]
resnet_quantized_tflite_file = tflite_models_dir/"resnet_v2_101_quantized.tflite"
resnet_quantized_tflite_file.write_bytes(converter.convert())
!ls -lh {tflite_models_dir}/*.tflite
"""
Explanation: ใใฎไพใงใฏใๅง็ธฎใใใใขใใซใฎ็ฒพๅบฆใฏๅใใงใใ
ๆขๅญใฎใขใใซใฎๆ้ฉๅ
ไบๅใขใฏใใฃในใผใฌใคใคใผใๅใใ Resnet (Resnet-v2) ใฏใใใธใงใณใขใใชใฑใผใทใงใณใงๅบใไฝฟ็จใใใฆใใพใใresnet-v2-101 ใฎไบๅใใฌใผใใณใฐๆธใฟๅ็ตใฐใฉใใฏใTensorflow Hub ใงๅ
ฅๆใงใใพใใ
ๆฌกใฎๆนๆณใงใ้ๅญๅใใใๅ็ตใฐใฉใใ TensorFLow Lite ใใฉใใใใใใกใซๅคๆใงใใพใใ
End of explanation
"""
|
basp/notes | squares_and_roots.ipynb | mit | def plot_rect(ax, p, fmt='b'):
x, y = p
ax.plot([0, x], [y, y], fmt) # horizontal line
ax.plot([x, x], [0, y], fmt) # vertical line
with plt.xkcd():
fig, axes = plt.subplots(1, figsize=(4, 4))
pu.setup_axes(axes, xlim=(-1, 4), ylim=(-1, 4))
for x in [1,2,3]: plot_rect(axes, (x, x))
"""
Explanation: numbers on a plane
Numbers can be a lot more interesting than just a value if you're just willing to shift your perspective a bit.
integers
When we are dealing with integers we are dealing with all the whole numbers, zero and all the negative whole numbers. In math this set of numbers is often denoted with the symbol $\mathbb{Z}$. This is a countable infinite set and even though the numbers are a bit basic we can try to get some more insight into the structure of numbers.
squares
If we take a number and multiply it with itself we get a square number. These are called square because we can easily plot them as squares in a plot.
End of explanation
"""
with plt.xkcd():
fig, axes = plt.subplots(1, figsize=(4, 4))
pu.setup_axes(axes, xlim=(-1, 6), ylim=(-1, 6))
for x, y in [(1, 5), (5, 1)]:
plot_rect(axes, (x, y))
"""
Explanation: However, what happens we have a non-square number such as $5$?. We can't easily plot this as two equal lenghts, we'll have to turn it into a rectangle of $1 \times 5$ or $5 \times 1$.
End of explanation
"""
|
fantasycheng/udacity-deep-learning-project | tutorials/intro-to-rnns/Anna_KaRNNa_Exercises.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, we'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
"""
Explanation: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
End of explanation
"""
text[:100]
"""
Explanation: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
End of explanation
"""
encoded[:100]
"""
Explanation: And we can see the characters encoded as integers.
End of explanation
"""
len(vocab)
"""
Explanation: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
End of explanation
"""
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch =
n_batches =
# Keep only enough characters to make full batches
arr =
# Reshape into n_seqs rows
arr =
for n in range(0, arr.shape[1], n_steps):
# The features
x =
# The targets, shifted by one
y =
yield x, y
"""
Explanation: Making training mini-batches
Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
We have our text encoded as integers as one long array in encoded. Let's create a function that will give us an iterator for our batches. I like using generator functions to do this. Then we can pass encoded into this function and get our batch generator.
The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the number of batches we can make from some array arr, you divide the length of arr by the batch size. Once you know the number of batches and the batch size, you can get the total number of characters to keep.
After that, we need to split arr into $N$ sequences. You can do this using arr.reshape(size) where size is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (n_seqs below), let's make that the size of the first dimension. For the second dimension, you can use -1 as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$ where $K$ is the number of batches.
Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the array. For each subsequent batch, the window moves over by n_steps. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character. You'll usually see the first input character used as the last target character, so something like this:
python
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
where x is the input batch and y is the target batch.
The way I like to do this window is use range to take steps of size n_steps from $0$ to arr.shape[1], the total number of steps in each sequence. That way, the integers you get from range always point to the start of a batch, and each window is n_steps wide.
Exercise: Write the code for creating batches in the function below. The exercises in this notebook will not be easy. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, type out the solution code yourself.
End of explanation
"""
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
"""
Explanation: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
End of explanation
"""
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs =
targets =
# Keep probability placeholder for drop out layers
keep_prob =
return inputs, targets, keep_prob
"""
Explanation: If you implemented get_batches correctly, the above output should look something like
```
x
[[55 63 69 22 6 76 45 5 16 35]
[ 5 69 1 5 12 52 6 5 56 52]
[48 29 12 61 35 35 8 64 76 78]
[12 5 24 39 45 29 12 56 5 63]
[ 5 29 6 5 29 78 28 5 78 29]
[ 5 13 6 5 36 69 78 35 52 12]
[63 76 12 5 18 52 1 76 5 58]
[34 5 73 39 6 5 12 52 36 5]
[ 6 5 29 78 12 79 6 61 5 59]
[ 5 78 69 29 24 5 6 52 5 63]]
y
[[63 69 22 6 76 45 5 16 35 35]
[69 1 5 12 52 6 5 56 52 29]
[29 12 61 35 35 8 64 76 78 28]
[ 5 24 39 45 29 12 56 5 63 29]
[29 6 5 29 78 28 5 78 29 45]
[13 6 5 36 69 78 35 52 12 43]
[76 12 5 18 52 1 76 5 58 52]
[ 5 73 39 6 5 12 52 36 5 78]
[ 5 29 78 12 79 6 61 5 59 63]
[78 69 29 24 5 6 52 5 63 76]]
``
although the exact numbers will be different. Check to make sure the data is shifted over one step fory`.
Building the model
Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
<img src="assets/charRNN.png" width=500px>
Inputs
First off we'll create our input placeholders. As usual we need placeholders for the training data and the targets. We'll also create a placeholder for dropout layers called keep_prob. This will be a scalar, that is a 0-D tensor. To make a scalar, you create a placeholder without giving it a size.
Exercise: Create the input placeholders in the function below.
End of explanation
"""
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstm =
# Add dropout to the cell outputs
drop =
# Stack up multiple LSTM layers, for deep learning
cell =
initial_state =
return cell, initial_state
"""
Explanation: LSTM Cell
Here we will create the LSTM cell we'll use in the hidden layer. We'll use this cell as a building block for the RNN. So we aren't actually defining the RNN here, just the type of cell we'll use in the hidden layer.
We first create a basic LSTM cell with
python
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
where num_units is the number of units in the hidden layers in the cell. Then we can add dropout by wrapping it with
python
tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
You pass in a cell and it will automatically add dropout to the inputs or outputs. Finally, we can stack up the LSTM cells into layers with tf.contrib.rnn.MultiRNNCell. With this, you pass in a list of cells and it will send the output of one cell into the next cell. For example,
python
tf.contrib.rnn.MultiRNNCell([cell]*num_layers)
This might look a little weird if you know Python well because this will create a list of the same cell object. However, TensorFlow will create different weight matrices for all cell objects. Even though this is actually multiple LSTM cells stacked on each other, you can treat the multiple layers as one cell.
We also need to create an initial cell state of all zeros. This can be done like so
python
initial_state = cell.zero_state(batch_size, tf.float32)
Exercise: Below, implement the build_lstm function to create these LSTM cells and the initial state.
End of explanation
"""
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output =
# Reshape seq_output to a 2D tensor with lstm_size columns
x =
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w =
softmax_b =
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits =
# Use softmax to get the probabilities for predicted characters
out =
return out, logits
"""
Explanation: RNN Output
Here we'll create the output layer. We need to connect the output of the RNN cells to a full connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character, so we want this layer to have size $C$, the number of classes/characters we have in our text.
If our input has batch size $N$, number of steps $M$, and the hidden layer has $L$ hidden units, then the output is a 3D tensor with size $N \times M \times L$. The output of each LSTM cell has size $L$, we have $M$ of them, one for each sequence step, and we have $N$ sequences. So the total size is $N \times M \times L$.
We are using the same fully connected layer, the same weights, for each of the outputs. Then, to make things easier, we should reshape the outputs into a 2D tensor with shape $(M * N) \times L$. That is, one row for each sequence and step, where the values of each row are the output from the LSTM cells. We get the LSTM output as a list, lstm_output. First we need to concatenate this whole list into one array with tf.concat. Then, reshape it (with tf.reshape) to size $(M * N) \times L$.
One we have the outputs reshaped, we can do the matrix multiplication with the weights. We need to wrap the weight and bias variables in a variable scope with tf.variable_scope(scope_name) because there are weights being created in the LSTM cells. TensorFlow will throw an error if the weights created here have the same names as the weights created in the LSTM cells, which they will be default. To avoid this, we wrap the variables in a variable scope so we can give them unique names.
Exercise: Implement the output layer in the function below.
End of explanation
"""
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot =
y_reshaped =
# Softmax cross entropy loss
loss =
return loss
"""
Explanation: Training loss
Next up is the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First we need to one-hot encode the targets, we're getting them as encoded characters. Then, reshape the one-hot targets so it's a 2D tensor with size $(MN) \times C$ where $C$ is the number of classes/characters we have. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $C$ units. So our logits will also have size $(MN) \times C$.
Then we run the logits and targets through tf.nn.softmax_cross_entropy_with_logits and find the mean to get the loss.
Exercise: Implement the loss calculation in the function below.
End of explanation
"""
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
"""
Explanation: Optimizer
Here we build the optimizer. Normal RNNs have have issues gradients exploding and disappearing. LSTMs fix the disappearance problem, but the gradients can still grow without bound. To fix this, we can clip the gradients above some threshold. That is, if a gradient is larger than that threshold, we set it to the threshold. This will ensure the gradients never grow overly large. Then we use an AdamOptimizer for the learning step.
End of explanation
"""
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob =
# Build the LSTM cell
cell, self.initial_state =
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot =
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state =
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits =
# Loss and optimizer (with gradient clipping)
self.loss =
self.optimizer =
"""
Explanation: Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use tf.nn.dynamic_rnn. This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as final_state so we can pass it to the first LSTM cell in the the next mini-batch run. For tf.nn.dynamic_rnn, we pass in the cell and initial state we get from build_lstm, as well as our input sequences. Also, we need to one-hot encode the inputs before going into the RNN.
Exercise: Use the functions you've implemented previously and tf.nn.dynamic_rnn to build the network.
End of explanation
"""
batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability
"""
Explanation: Hyperparameters
Here are the hyperparameters for the network.
batch_size - Number of sequences running through the network in one pass.
num_steps - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
lstm_size - The number of units in the hidden layers.
num_layers - Number of hidden LSTM layers to use
learning_rate - Learning rate for training
keep_prob - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to where it originally came from.
Tips and Tricks
Monitoring Validation Loss vs. Training Loss
If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
If your training loss is much lower than validation loss then this means the network might be overfitting. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
If your training/validation loss are about equal then your model is underfitting. Increase the size of your model (either number of layers or the raw number of neurons per layer)
Approximate number of parameters
The two most important parameters that control the model are lstm_size and num_layers. I would advise that you always use num_layers of either 2/3. The lstm_size can be adjusted based on how much data you have. The two important quantities to keep track of here are:
The number of parameters in your model. This is printed when you start training.
The size of your dataset. 1MB file is approximately 1 million characters.
These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make lstm_size larger.
I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
Best models strategy
The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
End of explanation
"""
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
"""
Explanation: Time for training
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I save a checkpoint.
Here I'm saving checkpoints with the format
i{iteration number}_l{# hidden layer units}.ckpt
Exercise: Set the hyperparameters above to train the network. Watch the training loss, it should be consistently dropping. Also, I highly advise running this on a GPU.
End of explanation
"""
tf.train.get_checkpoint_state('checkpoints')
"""
Explanation: Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Here, pass in the path to a checkpoint and sample from the network.
End of explanation
"""
|
aylward/ITKTubeTK | examples/Demo-ConvertTubesToImage.ipynb | apache-2.0 | import os
import sys
import numpy
import itk
from itk import TubeTK as ttk
from itkwidgets import view
"""
Explanation: Convert Tubes To Images
This notebook contains a few examples of how to call wrapped methods in itk and ITKTubeTK.
ITK, ITKTubeTK, and ITKWidgets must be installed on your system for this notebook to work.
End of explanation
"""
PixelType = itk.F
Dimension = 3
ImageType = itk.Image[PixelType, Dimension]
# Read tre file
TubeFileReaderType = itk.SpatialObjectReader[Dimension]
tubeFileReader = TubeFileReaderType.New()
tubeFileReader.SetFileName("Data/MRI-Normals/Normal071-VascularNetwork.tre")
tubeFileReader.Update()
tubes = tubeFileReader.GetGroup()
# Read template image
TemplateImageType = itk.Image[PixelType, Dimension]
TemplateImageReaderType = itk.ImageFileReader[TemplateImageType]
templateImageReader = TemplateImageReaderType.New()
templateImageReader.SetFileName("Data/MRI-Normals/Normal071-MRA.mha")
templateImageReader.Update()
templateImage = templateImageReader.GetOutput()
"""
Explanation: Load the tubes and a reference image which provides the size, spacing, origin, and orientation for the desired output image.
End of explanation
"""
view(templateImage)
"""
Explanation: Visualize the template image, just because it looks cool - the data in the image is actually irrelevant.
End of explanation
"""
TubesToImageFilterType = ttk.ConvertTubesToImage[TemplateImageType]
tubesToImageFilter = TubesToImageFilterType.New()
tubesToImageFilter.SetUseRadius(True)
tubesToImageFilter.SetTemplateImage(templateImageReader.GetOutput())
tubesToImageFilter.SetInput(tubes)
tubesToImageFilter.Update()
outputImage = tubesToImageFilter.GetOutput()
"""
Explanation: Create a binary image that represents the spatial extent of the TubeSpatialObjects in the hierarchy of SpatialObjects in the variable "tubes" that was read-in above. If you only want to visualize centerlines of the tubes, set "UseRadius" to false.
End of explanation
"""
TTKImageMathType = ttk.ImageMath[ImageType,ImageType]
imMath = TTKImageMathType.New(Input = outputImage)
imMath.AddImages(templateImage, 2048, 1)
combinedImage = imMath.GetOutput()
view(combinedImage)
"""
Explanation: Visualize the results by blending the template and output images. Again, the content of the template image
doesn't actually matter, but since these tubes were generated from the content of the template image, blending them illustrates how well the binary tube image corresponds with their source image.
End of explanation
"""
|
bzamecnik/ml | instrument-classification/analyze_instrument_ranges.ipynb | mit | plt.hist(x_rms_instruments_notes[x_rms_instruments_notes <= 1].flatten(), 200);
plt.hist(x_rms_instruments_notes[x_rms_instruments_notes > 1].flatten(), 200);
"""
Explanation: There's a peak at value around 1.0 which represents quiet.
End of explanation
"""
plt.imshow(x_rms_instruments_notes > 1, interpolation='none', cmap='gray')
plt.grid(True)
plt.suptitle('MIDI instruments range - RMS power')
plt.xlabel('MIDI note')
plt.ylabel('MIDI instrument')
plt.savefig('data/working/instrument_ranges_binary.png');
"""
Explanation: The range of instruments split into quiet (black) and sounding (white) regions. We can limit the pitches to the sounding ones.
End of explanation
"""
|
gouthambs/karuth-source | content/extra/notebooks/numba_example.ipynb | artistic-2.0 | import numpy as np
import numba
import cython
%load_ext cython
import pandas as pd
numba.__version__, cython.__version__, np.__version__
"""
Explanation: Optimizing Python Code: Numba vs Cython
Goutham Balaraman
I came across an old post by jakevdp on Numba vs Cython. I thought I will revisit this topic because both Numba and Cython has matured significantly over this time period. In this post I am going to do two examples:
1. Pairwise distance estimation example that Jake discusses. The intention is to see how the maturity of these projects has contributed to improvements.
2. A simple cashflow payment calculation of an amortizing bond or mortgage payments. This a calculation that cannot be vectorized in a numpy sense. So the speedups would have to come from optimizing loops using tools like Numba or Cython.
End of explanation
"""
X = np.random.random((1000, 3))
def pairwise_python(X):
M = X.shape[0]
N = X.shape[1]
D = np.empty((M, M), dtype=np.float)
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = np.sqrt(d)
return D
%timeit -n10 pairwise_python(X)
def pairwise_numpy(X):
return np.sqrt(((X[:, None, :] - X) ** 2).sum(-1))
%timeit -n10 pairwise_numpy(X)
pairwise_numba = numba.jit(pairwise_python)
%timeit -n10 pairwise_numba(X)
%%cython
import numpy as np
cimport cython
from libc.math cimport sqrt
@cython.boundscheck(False)
@cython.wraparound(False)
def pairwise_cython(double[:, ::1] X):
cdef int M = X.shape[0]
cdef int N = X.shape[1]
cdef double tmp, d
cdef double[:, ::1] D = np.empty((M, M), dtype=np.float64)
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = sqrt(d)
return np.asarray(D)
%timeit -n10 pairwise_cython(X)
"""
Explanation: Pairwise Distance Estimation
End of explanation
"""
df1 = pd.DataFrame({"Time (ms)": [13400,111, 9.12, 9.87], "Speedup": [1, 121, 1469, 1357]},
index=["Python", "Numpy", "Numba", "Cython"])
df2 = pd.DataFrame({"Time (ms)": [2470, 38.3, 4.04, 6.6], "Speedup": [1, 65, 611, 374]},
index=["Python", "Numpy", "Numba", "Cython"])
df = pd.concat([df1, df2], axis = 1, keys=(["2013", "2017"]))
df
"""
Explanation: The timiing for the results in Jake's post (2013) and the results from this post (2017) are summarized below.
End of explanation
"""
def amortize_payments_py(B0, R, term, cpr=0.0):
smm = 1. - pow(1 - cpr/100., 1/12.)
r = R/1200.
S = np.zeros(term)
P = np.zeros(term)
I = np.zeros(term)
B = np.zeros(term)
Pr = np.zeros(term)
Bt = B0
pow_term = pow(1+r, term)
A = Bt*r*pow_term/(pow_term - 1)
for i in range(term):
n = term-i
I[i] = Bt * r
Pr[i] = smm*Bt
S[i] = A-I[i] if Bt>1e-2 else 0.
P[i] = S[i] + Pr[i]
Bt = max(Bt - P[i], 0.0)
B[i] = Bt
return S,I, Pr,P, B
"""
Explanation: The timings are speedup number for the 2013 and 2017 runs are very different due to differences in versions and perhaps even the python version. This post is using Py35 running in Windows. The take away here is that the numpy is atleast 2 orders of magnitude faster than python. And the numba and cython snippets are about an order of magnitude faster than numpy in both the benchmarks.
I will not rush to make any claims on numba vs cython. It is unclear what kinds of optimizations is used in the cython magic. I would expect the cython code to be as fast as C and perhaps some tweaking will help us get there. It is really interesting how easy it is to get performance boost from numba. From an ease of use point of view, numba is hands down winner in this simple example.
Amortizing Payments
Here lets look at one more example. This is an amortizing payment calculation, such as in mortgage payments.
End of explanation
"""
%%cython
cimport cython
import numpy as np
from libc.math cimport pow
@cython.boundscheck(False)
@cython.wraparound(False)
def amortize_payments_cy(double B0,double R,int term,double cpr=0.0):
cdef double smm = 1. - pow(1 - cpr/100., 1/12.)
cdef double r = R/1200.
cdef double[:] D = np.empty(term, dtype=np.float64)
cdef double[:] S = np.empty(term, dtype=np.float64)
cdef double[:] P = np.empty(term, dtype=np.float64)
cdef double[:] I = np.empty(term, dtype=np.float64)
cdef double[:] B = np.empty(term, dtype=np.float64)
cdef double[:] Pr = np.empty(term, dtype=np.float64)
cdef double Bt = B0
cdef double pow_term = pow(1+r, term)
cdef double A = Bt*r*pow_term/(pow_term - 1.)
cdef double n = term
cdef int i=0
for i in range(term):
n = term-i
I[i] = Bt * r
Pr[i] = smm*Bt
S[i] = A-I[i] if Bt>1e-2 else 0.
P[i] = S[i] + Pr[i]
Bt = max(Bt - P[i], 0.0)
B[i] = Bt
return np.asarray(S),np.asarray(I), np.asarray(Pr),np.asarray(P), np.asarray(B)
"""
Explanation: Here is the equivalent Cython function.
End of explanation
"""
amortize_payments_nb = numba.njit(cache=True)(amortize_payments_py)
"""
Explanation: Here is the Numba version
End of explanation
"""
B0 = 500000.
R = 4.0
term = 360
"""
Explanation: Let's compare the performance of the three function types.
End of explanation
"""
%timeit -n1000 S,I, Pr,P, B = amortize_payments_py(B0, R, term, cpr=10)
"""
Explanation: Python
End of explanation
"""
%timeit -n1000 S,I, Pr,P, B = amortize_payments_nb(B0, R, term, cpr=10)
"""
Explanation: Numba
End of explanation
"""
%timeit -n1000 S,I, Pr,P, B = amortize_payments_cy(B0, R, term, cpr=10)
"""
Explanation: Cython
End of explanation
"""
|
CalPolyPat/phys202-project | .ipynb_checkpoints/NeuralNetworks-checkpoint.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
from IPython.html.widgets import interact
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data.shape)
def show_digit(i):
plt.matshow(digits.images[i]);
interact(show_digit, i=(0,100));
"""
Explanation: Neural Networks
This project was created by Brian Granger. All content is licensed under the MIT License.
Introduction
Neural networks are a class of algorithms that can learn how to compute the value of a function given previous examples of the functions output. Because neural networks are capable of learning how to compute the output of a function based on existing data, they generally fall under the field of Machine Learning.
Let's say that we don't know how to compute some function $f$:
$$ f(x) \rightarrow y $$
But we do have some data about the output that $f$ produces for particular input $x$:
$$ f(x_1) \rightarrow y_1 $$
$$ f(x_2) \rightarrow y_2 $$
$$ \ldots $$
$$ f(x_n) \rightarrow y_n $$
A neural network learns how to use that existing data to compute the value of the function $f$ on yet unseen data. Neural networks get their name from the similarity of their design to how neurons in the brain work.
Work on neural networks began in the 1940s, but significant advancements were made in the 1970s (backpropagation) and more recently, since the late 2000s, with the advent of deep neural networks. These days neural networks are starting to be used extensively in products that you use. A great example of the application of neural networks is the recently released Flickr automated image tagging. With these algorithms, Flickr is able to determine what tags ("kitten", "puppy") should be applied to each photo, without human involvement.
In this case the function takes an image as input and outputs a set of tags for that image:
$$ f(image) \rightarrow {tag_1, \ldots} $$
For the purpose of this project, good introductions to neural networks can be found at:
The Nature of Code, Daniel Shiffman.
Neural Networks and Deep Learning, Michael Nielsen.
Data Science from Scratch, Joel Grus
The Project
Your general goal is to write Python code to predict the number associated with handwritten digits. The dataset for these digits can be found in sklearn:
End of explanation
"""
digits.target
"""
Explanation: The actual, known values (0,1,2,3,4,5,6,7,8,9) associated with each image can be found in the target array:
End of explanation
"""
|
maxis42/ML-DA-Coursera-Yandex-MIPT | 1 Mathematics and Python/Lectures notebooks/10 numpy arrays and operations with them/matrix_operations-.ipynb | mit | import numpy as np
"""
Explanation: NumPy: ะผะฐััะธัั ะธ ะพะฟะตัะฐัะธะธ ะฝะฐะด ะฝะธะผะธ
ะ ััะพะผ ะฝะพััะฑัะบะต ะธะท ััะพัะพะฝะฝะธั
ะฑะธะฑะปะธะพัะตะบ ะฝะฐะผ ะฟะพะฝะฐะดะพะฑะธััั ัะพะปัะบะพ NumPy. ะะปั ัะดะพะฑััะฒะฐ ะธะผะฟะพััะธััะตะผ ะตะต ะฟะพะด ะฑะพะปะตะต ะบะพัะพัะบะธะผ ะธะผะตะฝะตะผ:
End of explanation
"""
a = np.array([[1, 2, 3], [2, 5, 6], [6, 7, 4]])
print "ะะฐััะธัะฐ:\n", a
"""
Explanation: 1. ะกะพะทะดะฐะฝะธะต ะผะฐััะธั
ะัะธะฒะตะดะตะผ ะฝะตัะบะพะปัะบะพ ัะฟะพัะพะฑะพะฒ ัะพะทะดะฐะฝะธั ะผะฐััะธั ะฒ NumPy.
ะกะฐะผัะน ะฟัะพััะพะน ัะฟะพัะพะฑ โ ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.array(list, dtype=None, ...).
ะ ะบะฐัะตััะฒะต ะฟะตัะฒะพะณะพ ะฐัะณัะผะตะฝัะฐ ะตะน ะฝะฐะดะพ ะฟะตัะตะดะฐัั ะธัะตัะธััะตะผัะน ะพะฑัะตะบั, ัะปะตะผะตะฝัะฐะผะธ ะบะพัะพัะพะณะพ ัะฒะปััััั ะดััะณะธะต ะธัะตัะธััะตะผัะต ะพะฑัะตะบัั ะพะดะธะฝะฐะบะพะฒะพะน ะดะปะธะฝั ะธ ัะพะดะตัะถะฐัะธะต ะดะฐะฝะฝัะต ะพะดะธะฝะฐะบะพะฒะพะณะพ ัะธะฟะฐ.
ะัะพัะพะน ะฐัะณัะผะตะฝั ัะฒะปัะตััั ะพะฟัะธะพะฝะฐะปัะฝัะผ ะธ ะพะฟัะตะดะตะปัะตั ัะธะฟ ะดะฐะฝะฝัั
ะผะฐััะธัั. ะะณะพ ะผะพะถะฝะพ ะฝะต ะทะฐะดะฐะฒะฐัั, ัะพะณะดะฐ ัะธะฟ ะดะฐะฝะฝัั
ะฑัะดะตั ะพะฟัะตะดะตะปะตะฝ ะธะท ัะธะฟะฐ ัะปะตะผะตะฝัะพะฒ ะฟะตัะฒะพะณะพ ะฐัะณัะผะตะฝัะฐ. ะัะธ ะทะฐะดะฐะฝะธะธ ััะพะณะพ ะฟะฐัะฐะผะตััะฐ ะฑัะดะตั ะฟัะพะธะทะฒะตะดะตะฝะฐ ะฟะพะฟััะบะฐ ะฟัะธะฒะตะดะตะฝะธั ัะธะฟะพะฒ.
ะะฐะฟัะธะผะตั, ะผะฐััะธัั ะธะท ัะฟะธัะบะฐ ัะฟะธัะบะพะฒ ัะตะปัั
ัะธัะตะป ะผะพะถะฝะพ ัะพะทะดะฐัั ัะปะตะดัััะธะผ ะพะฑัะฐะทะพะผ:
End of explanation
"""
b = np.eye(5)
print "ะะดะธะฝะธัะฝะฐั ะผะฐััะธัะฐ:\n", b
c = np.ones((7, 5))
print "ะะฐััะธัะฐ, ัะพััะพััะฐั ะธะท ะพะดะฝะธั
ะตะดะธะฝะธั:\n", c
"""
Explanation: ะัะพัะพะน ัะฟะพัะพะฑ ัะพะทะดะฐะฝะธั โ ั ะฟะพะผะพััั ะฒัััะพะตะฝะฝัั
ััะฝะบัะธะน numpy.eye(N, M=None, ...), numpy.zeros(shape, ...), numpy.ones(shape, ...).
ะะตัะฒะฐั ััะฝะบัะธั ัะพะทะดะฐะตั ะตะดะธะฝะธัะฝัั ะผะฐััะธัั ัะฐะทะผะตัะฐ $N \times M$; ะตัะปะธ $M$ ะฝะต ะทะฐะดะฐะฝ, ัะพ $M = N$.
ะัะพัะฐั ะธ ััะตััั ััะฝะบัะธะธ ัะพะทะดะฐัั ะผะฐััะธัั, ัะพััะพััะธะต ัะตะปะธะบะพะผ ะธะท ะฝัะปะตะน ะธะปะธ ะตะดะธะฝะธั ัะพะพัะฒะตัััะฒะตะฝะฝะพ. ะ ะบะฐัะตััะฒะต ะฟะตัะฒะพะณะพ ะฐัะณัะผะตะฝัะฐ ะฝะตะพะฑั
ะพะดะธะผะพ ะทะฐะดะฐัั ัะฐะทะผะตัะฝะพััั ะผะฐััะธะฒะฐ โ ะบะพััะตะถ ัะตะปัั
ัะธัะตะป. ะ ะดะฒัะผะตัะฝะพะผ ัะปััะฐะต ััะพ ะฝะฐะฑะพั ะธะท ะดะฒัั
ัะธัะตะป: ะบะพะปะธัะตััะฒะพ ัััะพะบ ะธ ััะพะปะฑัะพะฒ ะผะฐััะธัั.
ะัะธะผะตัั:
End of explanation
"""
v = np.arange(0, 24, 2)
print "ะะตะบัะพั-ััะพะปะฑะตั:\n", v
d = v.reshape((3, 4))
print "ะะฐััะธัะฐ:\n", d
"""
Explanation: ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต: ัะฐะทะผะตัะฝะพััั ะผะฐััะธะฒะฐ ะทะฐะดะฐะตััั ะฝะต ะดะฒัะผั ะฐัะณัะผะตะฝัะฐะผะธ ััะฝะบัะธะธ, ะฐ ะพะดะฝะธะผ โ ะบะพััะตะถะตะผ!
ะะพั ัะฐะบ โ np.ones(7, 5) โ ัะพะทะดะฐัั ะผะฐััะธะฒ ะฝะต ะฟะพะปััะธััั, ัะฐะบ ะบะฐะบ ััะฝะบัะธะธ ะฒ ะบะฐัะตััะฒะต ะฟะฐัะฐะผะตััะฐ shape ะฟะตัะตะดะฐะตััั 7, ะฐ ะฝะต ะบะพััะตะถ (7, 5).
ะ, ะฝะฐะบะพะฝะตั, ััะตัะธะน ัะฟะพัะพะฑ โ ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.arange([start, ]stop, [step, ], ...), ะบะพัะพัะฐั ัะพะทะดะฐะตั ะพะดะฝะพะผะตัะฝัะน ะผะฐััะธะฒ ะฟะพัะปะตะดะพะฒะฐัะตะปัะฝัั
ัะธัะตะป ะธะท ะฟัะพะผะตะถััะบะฐ [start, stop) ั ะทะฐะดะฐะฝะฝัะผ ัะฐะณะพะผ step, ะธ ะผะตัะพะดะฐ array.reshape(shape).
ะะฐัะฐะผะตัั shape, ะบะฐะบ ะธ ะฒ ะฟัะตะดัะดััะตะผ ะฟัะธะผะตัะต, ะทะฐะดะฐะตั ัะฐะทะผะตัะฝะพััั ะผะฐััะธัั (ะบะพััะตะถ ัะธัะตะป). ะะพะณะธะบะฐ ัะฐะฑะพัั ะผะตัะพะดะฐ ััะฝะฐ ะธะท ัะปะตะดัััะตะณะพ ะฟัะธะผะตัะฐ:
End of explanation
"""
print "ะะฐััะธัะฐ:\n", d
"""
Explanation: ะะพะปะตะต ะฟะพะดัะพะฑะฝะพ ะพ ัะพะผ, ะบะฐะบ ัะพะทะดะฐะฒะฐัั ะผะฐััะธะฒั ะฒ NumPy,
ัะผ. ะดะพะบัะผะตะฝัะฐัะธั.
2. ะะฝะดะตะบัะธัะพะฒะฐะฝะธะต
ะะปั ะฟะพะปััะตะฝะธั ัะปะตะผะตะฝัะพะฒ ะผะฐััะธัั ะผะพะถะฝะพ ะธัะฟะพะปัะทะพะฒะฐัั ะฝะตัะบะพะปัะบะพ ัะฟะพัะพะฑะพะฒ. ะ ะฐััะผะพััะธะผ ัะฐะผัะต ะฟัะพัััะต ะธะท ะฝะธั
.
ะะปั ัะดะพะฑััะฒะฐ ะฝะฐะฟะพะผะฝะธะผ, ะบะฐะบ ะฒัะณะปัะดะธั ะผะฐััะธัะฐ d:
End of explanation
"""
print "ะัะพัะพะน ัะปะตะผะตะฝั ััะตััะตะน ัััะพะบะธ ะผะฐััะธัั:", d[2, 1]
"""
Explanation: ะญะปะตะผะตะฝั ะฝะฐ ะฟะตัะตัะตัะตะฝะธะธ ัััะพะบะธ i ะธ ััะพะปะฑัะฐ j ะผะพะถะฝะพ ะฟะพะปััะธัั ั ะฟะพะผะพััั ะฒััะฐะถะตะฝะธั array[i, j].
ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต: ัััะพะบะธ ะธ ััะพะปะฑัั ะฝัะผะตัััััั ั ะฝัะปั!
End of explanation
"""
print "ะัะพัะฐั ัััะพะบะฐ ะผะฐััะธัั d:\n", d[1, :]
print "ะงะตัะฒะตัััะน ััะพะปะฑะตั ะผะฐััะธัั d:\n", d[:, 3]
"""
Explanation: ะะท ะผะฐััะธัั ะผะพะถะฝะพ ะฟะพะปััะฐัั ัะตะปัะต ัััะพะบะธ ะธะปะธ ััะพะปะฑัั ั ะฟะพะผะพััั ะฒััะฐะถะตะฝะธะน array[i, :] ะธะปะธ array[:, j] ัะพะพัะฒะตัััะฒะตะฝะฝะพ:
End of explanation
"""
print "ะญะปะตะผะตะฝัั ะผะฐััะธัั d ั ะบะพะพัะดะธะฝะฐัะฐะผะธ (1, 2) ะธ (0, 3):\n", d[[1, 0], [2, 3]]
"""
Explanation: ะัะต ะพะดะธะฝ ัะฟะพัะพะฑ ะฟะพะปััะตะฝะธั ัะปะตะผะตะฝัะพะฒ โ ั ะฟะพะผะพััั ะฒััะฐะถะตะฝะธั array[list1, list2], ะณะดะต list1, list2 โ ะฝะตะบะพัะพััะต ัะฟะธัะบะธ ัะตะปัั
ัะธัะตะป. ะัะธ ัะฐะบะพะน ะฐะดัะตัะฐัะธะธ ะพะดะฝะพะฒัะตะผะตะฝะฝะพ ะฟัะพัะผะฐััะธะฒะฐัััั ะพะฑะฐ ัะฟะธัะบะฐ ะธ ะฒะพะทะฒัะฐัะฐัััั ัะปะตะผะตะฝัั ะผะฐััะธัั ั ัะพะพัะฒะตัััะฒัััะธะผะธ ะบะพะพัะดะธะฝะฐัะฐะผะธ. ะกะปะตะดัััะธะน ะฟัะธะผะตั ะฑะพะปะตะต ะฟะพะฝััะฝะพ ะพะฑัััะฝัะตั ะผะตั
ะฐะฝะธะทะผ ัะฐะฑะพัั ัะฐะบะพะณะพ ะธะฝะดะตะบัะธัะพะฒะฐะฝะธั:
End of explanation
"""
a = np.array([1, 2, 3])
b = np.array([[1], [2], [3]])
"""
Explanation: ะะพะปะตะต ะฟะพะดัะพะฑะฝะพ ะพ ัะฐะทะปะธัะฝัั
ัะฟะพัะพะฑะฐั
ะธะฝะดะตะบัะธัะพะฒะฐะฝะธั ะฒ ะผะฐััะธะฒะฐั
ัะผ. ะดะพะบัะผะตะฝัะฐัะธั.
3. ะะตะบัะพัั, ะฒะตะบัะพั-ัััะพะบะธ ะธ ะฒะตะบัะพั-ััะพะปะฑัั
ะกะปะตะดัััะธะต ะดะฒะฐ ัะฟะพัะพะฑะฐ ะทะฐะดะฐะฝะธั ะผะฐััะธะฒะฐ ะบะฐะถัััั ะพะดะธะฝะฐะบะพะฒัะผะธ:
End of explanation
"""
print "ะะตะบัะพั:\n", a
print "ะะณะพ ัะฐะทะผะตัะฝะพััั:\n", a.shape
print "ะะฒัะผะตัะฝัะน ะผะฐััะธะฒ:\n", b
print "ะะณะพ ัะฐะทะผะตัะฝะพััั:\n", b.shape
"""
Explanation: ะะดะฝะฐะบะพ, ะฝะฐ ัะฐะผะพะผ ะดะตะปะต, ััะพ ะทะฐะดะฐะฝะธะต ะพะดะฝะพะผะตัะฝะพะณะพ ะผะฐััะธะฒะฐ (ัะพ ะตััั ะฒะตะบัะพัะฐ) ะธ ะดะฒัะผะตัะฝะพะณะพ ะผะฐััะธะฒะฐ:
End of explanation
"""
a = a.T
b = b.T
print "ะะตะบัะพั ะฝะต ะธะทะผะตะฝะธะปัั:\n", a
print "ะะณะพ ัะฐะทะผะตัะฝะพััั ัะฐะบะถะต ะฝะต ะธะทะผะตะฝะธะปะฐัั:\n", a.shape
print "ะขัะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะฝัะน ะดะฒัะผะตัะฝัะน ะผะฐััะธะฒ:\n", b
print "ะะณะพ ัะฐะทะผะตัะฝะพััั ะธะทะผะตะฝะธะปะฐัั:\n", b.shape
"""
Explanation: ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต: ะฒะตะบัะพั (ะพะดะฝะพะผะตัะฝัะน ะผะฐััะธะฒ) ะธ ะฒะตะบัะพั-ััะพะปะฑะตั ะธะปะธ ะฒะตะบัะพั-ัััะพะบะฐ (ะดะฒัะผะตัะฝัะต ะผะฐััะธะฒั) ัะฒะปััััั ัะฐะทะปะธัะฝัะผะธ ะพะฑัะตะบัะฐะผะธ ะฒ NumPy, ั
ะพัั ะผะฐัะตะผะฐัะธัะตัะบะธ ะทะฐะดะฐัั ะพะดะธะฝ ะธ ัะพั ะถะต ะพะฑัะตะบั. ะ ัะปััะฐะต ะพะดะฝะพะผะตัะฝะพะณะพ ะผะฐััะธะฒะฐ ะบะพััะตะถ shape ัะพััะพะธั ะธะท ะพะดะฝะพะณะพ ัะธัะปะฐ ะธ ะธะผะตะตั ะฒะธะด (n,), ะณะดะต n โ ะดะปะธะฝะฐ ะฒะตะบัะพัะฐ. ะ ัะปััะฐะต ะดะฒัะผะตัะฝัั
ะฒะตะบัะพัะพะฒ ะฒ shape ะฟัะธัััััะฒัะตั ะตัะต ะพะดะฝะฐ ัะฐะทะผะตัะฝะพััั, ัะฐะฒะฝะฐั ะตะดะธะฝะธัะต.
ะ ะฑะพะปััะธะฝััะฒะต ัะปััะฐะตะฒ ะฝะตะฒะฐะถะฝะพ, ะบะฐะบะพะต ะฟัะตะดััะฐะฒะปะตะฝะธะต ะธัะฟะพะปัะทะพะฒะฐัั, ะฟะพัะพะผั ััะพ ัะฐััะพ ััะฐะฑะฐััะฒะฐะตั ะฟัะธะฒะตะดะตะฝะธะต ัะธะฟะพะฒ. ะะพ ะฝะตะบะพัะพััะต ะพะฟะตัะฐัะธะธ ะฝะต ัะฐะฑะพัะฐัั ะดะปั ะพะดะฝะพะผะตัะฝัั
ะผะฐััะธะฒะพะฒ. ะะฐะฟัะธะผะตั, ััะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะธะต (ะพ ะฝะตะผ ะฟะพะนะดะตั ัะตัั ะฝะธะถะต):
End of explanation
"""
a = np.array([[1, 0], [0, 1]])
b = np.array([[4, 1], [2, 2]])
r1 = np.dot(a, b)
r2 = a.dot(b)
print "ะะฐััะธัะฐ A:\n", a
print "ะะฐััะธัะฐ B:\n", b
print "ะ ะตะทัะปััะฐั ัะผะฝะพะถะตะฝะธั ััะฝะบัะธะตะน:\n", r1
print "ะ ะตะทัะปััะฐั ัะผะฝะพะถะตะฝะธั ะผะตัะพะดะพะผ:\n", r2
"""
Explanation: 4. ะฃะผะฝะพะถะตะฝะธะต ะผะฐััะธั ะธ ััะพะปะฑัะพะฒ
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะะฟะตัะฐัะธั ัะผะฝะพะถะตะฝะธั ะพะฟัะตะดะตะปะตะฝะฐ ะดะปั ะดะฒัั
ะผะฐััะธั, ัะฐะบะธั
ััะพ ัะธัะปะพ ััะพะปะฑัะพะฒ ะฟะตัะฒะพะน ัะฐะฒะฝะพ ัะธัะปั ัััะพะบ ะฒัะพัะพะน.
ะัััั ะผะฐััะธัั $A$ ะธ $B$ ัะฐะบะพะฒั, ััะพ $A \in \mathbb{R}^{n \times k}$ ะธ $B \in \mathbb{R}^{k \times m}$. ะัะพะธะทะฒะตะดะตะฝะธะตะผ ะผะฐััะธั $A$ ะธ $B$ ะฝะฐะทัะฒะฐะตััั ะผะฐััะธัะฐ $C$, ัะฐะบะฐั ััะพ $c_{ij} = \sum_{r=1}^{k} a_{ir}b_{rj}$, ะณะดะต $c_{ij}$ โ ัะปะตะผะตะฝั ะผะฐััะธัั $C$, ััะพััะธะน ะฝะฐ ะฟะตัะตัะตัะตะฝะธะธ ัััะพะบะธ ั ะฝะพะผะตัะพะผ $i$ ะธ ััะพะปะฑัะฐ ั ะฝะพะผะตัะพะผ $j$.
ะ NumPy ะฟัะพะธะทะฒะตะดะตะฝะธะต ะผะฐััะธั ะฒััะธัะปัะตััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.dot(a, b, ...) ะธะปะธ ั ะฟะพะผะพััั ะผะตัะพะดะฐ array1.dot(array2), ะณะดะต array1 ะธ array2 โ ะฟะตัะตะผะฝะพะถะฐะตะผัะต ะผะฐััะธัั.
End of explanation
"""
c = np.array([1, 2])
r3 = b.dot(c)
print "ะะฐััะธัะฐ:\n", b
print "ะะตะบัะพั:\n", c
print "ะ ะตะทัะปััะฐั ัะผะฝะพะถะตะฝะธั:\n", r3
"""
Explanation: ะะฐััะธัั ะฒ NumPy ะผะพะถะฝะพ ัะผะฝะพะถะฐัั ะธ ะฝะฐ ะฒะตะบัะพัั:
End of explanation
"""
r = a * b
print "ะะฐััะธัะฐ A:\n", a
print "ะะฐััะธัะฐ B:\n", b
print "ะ ะตะทัะปััะฐั ะฟะพะบะพะพัะดะธะฝะฐัะฝะพะณะพ ัะผะฝะพะถะตะฝะธั ัะตัะตะท ะพะฟะตัะฐัะธั *:\n", r
"""
Explanation: ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต: ะพะฟะตัะฐัะธั * ะฟัะพะธะทะฒะพะดะธั ะฝะฐะด ะผะฐััะธัะฐะผะธ ะฟะพะบะพะพัะดะธะฝะฐัะฝะพะต ัะผะฝะพะถะตะฝะธะต, ะฐ ะฝะต ะผะฐััะธัะฝะพะต!
End of explanation
"""
a = np.array([[1, 2], [3, 4]])
b = np.transpose(a)
c = a.T
print "ะะฐััะธัะฐ:\n", a
print "ะขัะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะธะต ััะฝะบัะธะตะน:\n", b
print "ะขัะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะธะต ะผะตัะพะดะพะผ:\n", c
"""
Explanation: ะะพะปะตะต ะฟะพะดัะพะฑะฝะพ ะพ ะผะฐััะธัะฝะพะผ ัะผะฝะพะถะตะฝะธะธ ะฒ NumPy
ัะผ. ะดะพะบัะผะตะฝัะฐัะธั.
5. ะขัะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะธะต ะผะฐััะธั
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะขัะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะฝะพะน ะผะฐััะธัะตะน $A^{T}$ ะฝะฐะทัะฒะฐะตััั ะผะฐััะธัะฐ, ะฟะพะปััะตะฝะฝะฐั ะธะท ะธัั
ะพะดะฝะพะน ะผะฐััะธัั $A$ ะทะฐะผะตะฝะพะน ัััะพะบ ะฝะฐ ััะพะปะฑัั. ะคะพัะผะฐะปัะฝะพ: ัะปะตะผะตะฝัั ะผะฐััะธัั $A^{T}$ ะพะฟัะตะดะตะปััััั ะบะฐะบ $a^{T}{ij} = a{ji}$, ะณะดะต $a^{T}_{ij}$ โ ัะปะตะผะตะฝั ะผะฐััะธัั $A^{T}$, ััะพััะธะน ะฝะฐ ะฟะตัะตัะตัะตะฝะธะธ ัััะพะบะธ ั ะฝะพะผะตัะพะผ $i$ ะธ ััะพะปะฑัะฐ ั ะฝะพะผะตัะพะผ $j$.
ะ NumPy ััะฐะฝัะฟะพะฝะธัะพะฒะฐะฝะฝะฐั ะผะฐััะธัะฐ ะฒััะธัะปัะตััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.transpose() ะธะปะธ ั ะฟะพะผะพััั ะผะตัะพะดะฐ array.T, ะณะดะต array โ ะฝัะถะฝัะน ะดะฒัะผะตัะฝัะน ะผะฐััะธะฒ.
End of explanation
"""
a = np.array([[1, 2, 1], [1, 1, 4], [2, 3, 6]], dtype=np.float32)
det = np.linalg.det(a)
print "ะะฐััะธัะฐ:\n", a
print "ะะฟัะตะดะตะปะธัะตะปั:\n", det
"""
Explanation: ะกะผ. ะฑะพะปะตะต ะฟะพะดัะพะฑะฝะพ ะพ
numpy.transpose() ะธ
array.T ะฒ NumPy.
ะ ัะปะตะดัััะธั
ัะฐะทะดะตะปะฐั
ะฐะบัะธะฒะฝะพ ะธัะฟะพะปัะทัะตััั ะผะพะดัะปั numpy.linalg, ัะตะฐะปะธะทัััะธะน ะฝะตะบะพัะพััะต ะฟัะธะปะพะถะตะฝะธั ะปะธะฝะตะนะฝะพะน ะฐะปะณะตะฑัั. ะะพะปะตะต ะฟะพะดัะพะฑะฝะพ ะพ ััะฝะบัะธัั
, ะพะฟะธัะฐะฝะฝัั
ะฝะธะถะต, ะธ ัะฐะทะปะธัะฝัั
ะดััะณะธั
ััะฝะบัะธัั
ััะพะณะพ ะผะพะดัะปั ะผะพะถะฝะพ ะฟะพัะผะพััะตัั ะฒ ะตะณะพ ะดะพะบัะผะตะฝัะฐัะธะธ.
6. ะะฟัะตะดะตะปะธัะตะปั ะผะฐััะธัั
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะะปั ะบะฒะฐะดัะฐัะฝัั
ะผะฐััะธั ัััะตััะฒัะตั ะฟะพะฝััะธะต ะพะฟัะตะดะตะปะธัะตะปั.
ะัััั $A$ โ ะบะฒะฐะดัะฐัะฝะฐั ะผะฐััะธัะฐ. ะะฟัะตะดะตะปะธัะตะปะตะผ (ะธะปะธ ะดะตัะตัะผะธะฝะฐะฝัะพะผ) ะผะฐััะธัั $A \in \mathbb{R}^{n \times n}$ ะฝะฐะทะพะฒะตะผ ัะธัะปะพ
$$\det A = \sum_{\alpha_{1}, \alpha_{2}, \dots, \alpha_{n}} (-1)^{N(\alpha_{1}, \alpha_{2}, \dots, \alpha_{n})} \cdot a_{\alpha_{1} 1} \cdot \cdot \cdot a_{\alpha_{n} n},
$$
ะณะดะต $\alpha_{1}, \alpha_{2}, \dots, \alpha_{n}$ โ ะฟะตัะตััะฐะฝะพะฒะบะฐ ัะธัะตะป ะพั $1$ ะดะพ $n$, $N(\alpha_{1}, \alpha_{2}, \dots, \alpha_{n})$ โ ัะธัะปะพ ะธะฝะฒะตััะธะน ะฒ ะฟะตัะตััะฐะฝะพะฒะบะต, ััะผะผะธัะพะฒะฐะฝะธะต ะฒะตะดะตััั ะฟะพ ะฒัะตะผ ะฒะพะทะผะพะถะฝัะผ ะฟะตัะตััะฐะฝะพะฒะบะฐะผ ะดะปะธะฝั $n$.
ะะต ััะพะธั ัะฐััััะฐะธะฒะฐัััั, ะตัะปะธ ััะพ ะพะฟัะตะดะตะปะตะฝะธะต ะฟะพะฝััะฝะพ ะฝะต ะดะพ ะบะพะฝัะฐ โ ะฒ ะดะฐะปัะฝะตะนัะตะผ ะฒ ัะฐะบะพะผ ะฒะธะดะต ะพะฝะพ ะฝะต ะฟะพะฝะฐะดะพะฑะธััั.
ะะฐะฟัะธะผะตั, ะดะปั ะผะฐััะธัั ัะฐะทะผะตัะฐ $2 \times 2$ ะฟะพะปััะฐะตััั:
$$\det \left( \begin{array}{cc} a_{11} & a_{12} \ a_{21} & a_{22} \end{array} \right) = a_{11} a_{22} - a_{12} a_{21}
$$
ะััะธัะปะตะฝะธะต ะพะฟัะตะดะตะปะธัะตะปั ะผะฐััะธัั ะฟะพ ะพะฟัะตะดะตะปะตะฝะธั ััะตะฑัะตั ะฟะพััะดะบะฐ $n!$ ะพะฟะตัะฐัะธะน, ะฟะพััะพะผั ัะฐะทัะฐะฑะพัะฐะฝั ะผะตัะพะดั, ะบะพัะพััะต ะฟะพะทะฒะพะปััั ะฒััะธัะปััั ะตะณะพ ะฑััััะพ ะธ ัััะตะบัะธะฒะฝะพ.
ะ NumPy ะพะฟัะตะดะตะปะธัะตะปั ะผะฐััะธัั ะฒััะธัะปัะตััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.det(a), ะณะดะต a โ ะธัั
ะพะดะฝะฐั ะผะฐััะธัะฐ.
End of explanation
"""
a = np.array([[1, 2, 3], [1, 1, 1], [2, 2, 2]])
r = np.linalg.matrix_rank(a)
print "ะะฐััะธัะฐ:\n", a
print "ะ ะฐะฝะณ ะผะฐััะธัั:", r
"""
Explanation: ะ ะฐััะผะพััะธะผ ะพะดะฝะพ ะธะฝัะตัะตัะฝะพะต ัะฒะพะนััะฒะพ ะพะฟัะตะดะตะปะธัะตะปั. ะัััั ั ะฝะฐั ะตััั ะฟะฐัะฐะปะปะตะปะพะณัะฐะผะผ ั ัะณะปะฐะผะธ ะฒ ัะพัะบะฐั
$(0, 0), (c,d), (a+c, b+d), (a, b)$ (ัะณะปั ะดะฐะฝั ะฒ ะฟะพััะดะบะต ะพะฑั
ะพะดะฐ ะฟะพ ัะฐัะพะฒะพะน ัััะตะปะบะต). ะขะพะณะดะฐ ะฟะปะพัะฐะดั ััะพะณะพ ะฟะฐัะฐะปะปะตะปะพะณัะฐะผะผะฐ ะผะพะถะฝะพ ะฒััะธัะปะธัั ะบะฐะบ ะผะพะดัะปั ะพะฟัะตะดะตะปะธัะตะปั ะผะฐััะธัั $\left( \begin{array}{cc} a & c \ b & d \end{array} \right)$. ะะพั
ะพะถะธะผ ะพะฑัะฐะทะพะผ ะผะพะถะฝะพ ะฒััะฐะทะธัั ะธ ะพะฑัะตะผ ะฟะฐัะฐะปะปะตะปะตะฟะธะฟะตะดะฐ ัะตัะตะท ะพะฟัะตะดะตะปะธัะตะปั ะผะฐััะธัั ัะฐะทะผะตัะฐ $3 \times 3$.
7. ะ ะฐะฝะณ ะผะฐััะธัั
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะ ะฐะฝะณะพะผ ะผะฐััะธัั $A$ ะฝะฐะทัะฒะฐะตััั ะผะฐะบัะธะผะฐะปัะฝะพะต ัะธัะปะพ ะปะธะฝะตะนะฝะพ ะฝะตะทะฐะฒะธัะธะผัั
ัััะพะบ (ััะพะปะฑัะพะฒ) ััะพะน ะผะฐััะธัั.
ะ NumPy ัะฐะฝะณ ะผะฐััะธัั ะฒััะธัะปัะตััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.matrix_rank(M, tol=None), ะณะดะต M โ ะผะฐััะธัะฐ, tol โ ะฟะฐัะฐะผะตัั, ะพัะฒะตัะฐััะธะน ะทะฐ ะฝะตะบะพัะพััั ัะพัะฝะพััั ะฒััะธัะปะตะฝะธั. ะ ะฟัะพััะพะผ ัะปััะฐะต ะผะพะถะฝะพ ะตะณะพ ะฝะต ะทะฐะดะฐะฒะฐัั, ะธ ััะฝะบัะธั ัะฐะผะฐ ะพะฟัะตะดะตะปะธั ะฟะพะดั
ะพะดััะตะต ะทะฝะฐัะตะฝะธะต ััะพะณะพ ะฟะฐัะฐะผะตััะฐ.
End of explanation
"""
a = np.array([1, 2, 3])
b = np.array([1, 1, 1])
c = np.array([2, 3, 5])
m = np.array([a, b, c])
print np.linalg.matrix_rank(m) == m.shape[0]
"""
Explanation: ะก ะฟะพะผะพััั ะฒััะธัะปะตะฝะธั ัะฐะฝะณะฐ ะผะฐััะธัั ะผะพะถะฝะพ ะฟัะพะฒะตัััั ะปะธะฝะตะนะฝัั ะฝะตะทะฐะฒะธัะธะผะพััั ัะธััะตะผั ะฒะตะบัะพัะพะฒ.
ะะพะฟัััะธะผ, ั ะฝะฐั ะตััั ะฝะตัะบะพะปัะบะพ ะฒะตะบัะพัะพะฒ. ะกะพััะฐะฒะธะผ ะธะท ะฝะธั
ะผะฐััะธัั, ะณะดะต ะฝะฐัะธ ะฒะตะบัะพัั ะฑัะดัั ัะฒะปััััั ัััะพะบะฐะผะธ. ะะพะฝััะฝะพ, ััะพ ะฒะตะบัะพัั ะปะธะฝะตะนะฝะพ ะฝะตะทะฐะฒะธัะธะผั ัะพะณะดะฐ ะธ ัะพะปัะบะพ ัะพะณะดะฐ, ะบะพะณะดะฐ ัะฐะฝะณ ะฟะพะปััะตะฝะฝะพะน ะผะฐััะธัั ัะพะฒะฟะฐะดะฐะตั ั ัะธัะปะพะผ ะฒะตะบัะพัะพะฒ. ะัะธะฒะตะดะตะผ ะฟัะธะผะตั:
End of explanation
"""
a = np.array([[3, 1], [1, 2]])
b = np.array([9, 8])
x = np.linalg.solve(a, b)
print "ะะฐััะธัะฐ A:\n", a
print "ะะตะบัะพั b:\n", b
print "ะ ะตัะตะฝะธะต ัะธััะตะผั:\n", x
"""
Explanation: 8. ะกะธััะตะผั ะปะธะฝะตะนะฝัั
ััะฐะฒะฝะตะฝะธะน
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะกะธััะตะผะพะน ะปะธะฝะตะนะฝัั
ะฐะปะณะตะฑัะฐะธัะตัะบะธั
ััะฐะฒะฝะตะฝะธะน ะฝะฐะทัะฒะฐะตััั ัะธััะตะผะฐ ะฒะธะดะฐ $Ax = b$, ะณะดะต $A \in \mathbb{R}^{n \times m}, x \in \mathbb{R}^{m \times 1}, b \in \mathbb{R}^{n \times 1}$. ะ ัะปััะฐะต ะบะฒะฐะดัะฐัะฝะพะน ะฝะตะฒััะพะถะดะตะฝะฝะพะน ะผะฐััะธัั $A$ ัะตัะตะฝะธะต ัะธััะตะผั ะตะดะธะฝััะฒะตะฝะฝะพ.
ะ NumPy ัะตัะตะฝะธะต ัะฐะบะพะน ัะธััะตะผั ะผะพะถะฝะพ ะฝะฐะนัะธ ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.solve(a, b), ะณะดะต ะฟะตัะฒัะน ะฐัะณัะผะตะฝั โ ะผะฐััะธัะฐ $A$, ะฒัะพัะพะน โ ััะพะปะฑะตั $b$.
End of explanation
"""
print a.dot(x)
"""
Explanation: ะฃะฑะตะดะธะผัั, ััะพ ะฒะตะบัะพั x ะดะตะนััะฒะธัะตะปัะฝะพ ัะฒะปัะตััั ัะตัะตะฝะธะตะผ ัะธััะตะผั:
End of explanation
"""
a = np.array([[0, 1], [1, 1], [2, 1], [3, 1]])
b = np.array([-1, 0.2, 0.9, 2.1])
x, res, r, s = np.linalg.lstsq(a, b)
print "ะะฐััะธัะฐ A:\n", a
print "ะะตะบัะพั b:\n", b
print "ะัะตะฒะดะพัะตัะตะฝะธะต ัะธััะตะผั:\n", x
"""
Explanation: ะัะฒะฐัั ัะปััะฐะธ, ะบะพะณะดะฐ ัะตัะตะฝะธะต ัะธััะตะผั ะฝะต ัััะตััะฒัะตั. ะะพ ั
ะพัะตะปะพัั ะฑั ะฒัะต ัะฐะฒะฝะพ "ัะตัะธัั" ัะฐะบัั ัะธััะตะผั. ะะพะณะธัะฝัะผ ะบะฐะถะตััั ะธัะบะฐัั ัะฐะบะพะน ะฒะตะบัะพั $x$, ะบะพัะพััะน ะผะธะฝะธะผะธะทะธััะตั ะฒััะฐะถะตะฝะธะต $\left\Vert Ax - b\right\Vert^{2}$ โ ัะฐะบ ะผั ะฟัะธะฑะปะธะทะธะผ ะฒััะฐะถะตะฝะธะต $Ax$ ะบ $b$.
ะ NumPy ัะฐะบะพะต ะฟัะตะฒะดะพัะตัะตะฝะธะต ะผะพะถะฝะพ ะธัะบะฐัั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.lstsq(a, b, ...), ะณะดะต ะฟะตัะฒัะต ะดะฒะฐ ะฐัะณัะผะตะฝัะฐ ัะฐะบะธะต ะถะต, ะบะฐะบ ะธ ะดะปั ััะฝะบัะธะธ numpy.linalg.solve().
ะะพะผะธะผะพ ัะตัะตะฝะธั ััะฝะบัะธั ะฒะพะทะฒัะฐัะฐะตั ะตัะต ััะธ ะทะฝะฐัะตะฝะธั, ะบะพัะพััะต ะฝะฐะผ ัะตะนัะฐั ะฝะต ะฟะพะฝะฐะดะพะฑัััั.
End of explanation
"""
a = np.array([[1, 2, 1], [1, 1, 4], [2, 3, 6]], dtype=np.float32)
b = np.linalg.inv(a)
print "ะะฐััะธัะฐ A:\n", a
print "ะะฑัะฐัะฝะฐั ะผะฐััะธัะฐ ะบ A:\n", b
print "ะัะพะธะทะฒะตะดะตะฝะธะต A ะฝะฐ ะพะฑัะฐัะฝัั ะดะพะปะถะฝะฐ ะฑััั ะตะดะธะฝะธัะฝะพะน:\n", a.dot(b)
"""
Explanation: 9. ะะฑัะฐัะตะฝะธะต ะผะฐััะธั
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะะปั ะบะฒะฐะดัะฐัะฝัั
ะฝะตะฒััะพะถะดะตะฝะฝัั
ะผะฐััะธั ะพะฟัะตะดะตะปะตะฝะพ ะฟะพะฝััะธะต ะพะฑัะฐัะฝะพะน ะผะฐััะธัั.
ะัััั $A$ โ ะบะฒะฐะดัะฐัะฝะฐั ะฝะตะฒััะพะถะดะตะฝะฝะฐั ะผะฐััะธัะฐ. ะะฐััะธัะฐ $A^{-1}$ ะฝะฐะทัะฒะฐะตััั ะพะฑัะฐัะฝะพะน ะผะฐััะธัะตะน ะบ $A$, ะตัะปะธ
$$AA^{-1} = A^{-1}A = I,
$$
ะณะดะต $I$ โ ะตะดะธะฝะธัะฝะฐั ะผะฐััะธัะฐ.
ะ NumPy ะพะฑัะฐัะฝัะต ะผะฐััะธัั ะฒััะธัะปััััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.inv(a), ะณะดะต a โ ะธัั
ะพะดะฝะฐั ะผะฐััะธัะฐ.
End of explanation
"""
a = np.array([[-1, -6], [2, 6]])
w, v = np.linalg.eig(a)
print "ะะฐััะธัะฐ A:\n", a
print "ะกะพะฑััะฒะตะฝะฝัะต ัะธัะปะฐ:\n", w
print "ะกะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัั:\n", v
"""
Explanation: 10. ะกะพะฑััะฒะตะฝะฝัะต ัะธัะปะฐ ะธ ัะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัะฐ ะผะฐััะธัั
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะะปั ะบะฒะฐะดัะฐัะฝัั
ะผะฐััะธั ะพะฟัะตะดะตะปะตะฝั ะฟะพะฝััะธั ัะพะฑััะฒะตะฝะฝะพะณะพ ะฒะตะบัะพัะฐ ะธ ัะพะฑััะฒะตะฝะฝะพะณะพ ัะธัะปะฐ.
ะัััั $A$ โ ะบะฒะฐะดัะฐัะฝะฐั ะผะฐััะธัะฐ ะธ $A \in \mathbb{R}^{n \times n}$. ะกะพะฑััะฒะตะฝะฝัะผ ะฒะตะบัะพัะพะผ ะผะฐััะธัั $A$ ะฝะฐะทัะฒะฐะตััั ัะฐะบะพะน ะฝะตะฝัะปะตะฒะพะน ะฒะตะบัะพั $x \in \mathbb{R}^{n}$, ััะพ ะดะปั ะฝะตะบะพัะพัะพะณะพ $\lambda \in \mathbb{R}$ ะฒัะฟะพะปะฝัะตััั ัะฐะฒะตะฝััะฒะพ $Ax = \lambda x$. ะัะธ ััะพะผ $\lambda$ ะฝะฐะทัะฒะฐะตััั ัะพะฑััะฒะตะฝะฝัะผ ัะธัะปะพะผ ะผะฐััะธัั $A$. ะกะพะฑััะฒะตะฝะฝัะต ัะธัะปะฐ ะธ ัะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัั ะผะฐััะธัั ะธะณัะฐัั ะฒะฐะถะฝัั ัะพะปั ะฒ ัะตะพัะธะธ ะปะธะฝะตะนะฝะพะน ะฐะปะณะตะฑัั ะธ ะตะต ะฟัะฐะบัะธัะตัะบะธั
ะฟัะธะปะพะถะตะฝะธัั
.
ะ NumPy ัะพะฑััะฒะตะฝะฝัะต ัะธัะปะฐ ะธ ัะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัั ะผะฐััะธัั ะฒััะธัะปััััั ั ะฟะพะผะพััั ััะฝะบัะธะธ numpy.linalg.eig(a), ะณะดะต a โ ะธัั
ะพะดะฝะฐั ะผะฐััะธัะฐ. ะ ะบะฐัะตััะฒะต ัะตะทัะปััะฐัะฐ ััะฐ ััะฝะบัะธั ะฒัะดะฐะตั ะพะดะฝะพะผะตัะฝัะน ะผะฐััะธะฒ w ัะพะฑััะฒะตะฝะฝัั
ัะธัะตะป ะธ ะดะฒัะผะตัะฝัะน ะผะฐััะธะฒ v, ะฒ ะบะพัะพัะพะผ ะฟะพ ััะพะปะฑัะฐะผ ะทะฐะฟะธัะฐะฝั ัะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัะฐ, ัะฐะบ ััะพ ะฒะตะบัะพั v[:, i] ัะพะพัะฒะตััะฒัะตั ัะพะฑััะฒะตะฝะฝะพะผั ัะธัะปั w[i].
End of explanation
"""
a = 3 + 2j
b = 1j
print "ะะพะผะฟะปะตะบัะฝะพะต ัะธัะปะพ a:\n", a
print "ะะพะผะฟะปะตะบัะฝะพะต ัะธัะปะพ b:\n", b
"""
Explanation: ะะฑัะฐัะธัะต ะฒะฝะธะผะฐะฝะธะต: ั ะฒะตัะตััะฒะตะฝะฝะพะน ะผะฐััะธัั ัะพะฑััะฒะตะฝะฝัะต ะทะฝะฐัะตะฝะธั ะธะปะธ ัะพะฑััะฒะตะฝะฝัะต ะฒะตะบัะพัั ะผะพะณัั ะฑััั ะบะพะผะฟะปะตะบัะฝัะผะธ.
11. ะะพะผะฟะปะตะบัะฝัะต ัะธัะปะฐ ะฒ ะฟะธัะพะฝะต
ะะฝะธะผะฐะฝะธะต: ะดะฐะฝะฝัะน ะผะฐัะตัะธะฐะป ัะฒะปัะตััั ะดะพะฟะพะปะฝะธัะตะปัะฝัะผ โ ะตะณะพ ะธะทััะตะฝะธะต ะฝะต ัะฒะปัะตััั ะฝะตะพะฑั
ะพะดะธะผัะผ ะดะปั ะฒัะฟะพะปะฝะตะฝะธั ัะตััะพะฒ.
ะะฐะฟะพะผะธะฝะฐะฝะธะต ัะตะพัะธะธ. ะะพะผะฟะปะตะบัะฝัะผะธ ัะธัะปะฐะผะธ ะฝะฐะทัะฒะฐัััั ัะธัะปะฐ ะฒะธะดะฐ $x + iy$, ะณะดะต $x$ ะธ $y$ โ ะฒะตัะตััะฒะตะฝะฝัะต ัะธัะปะฐ, ะฐ $i$ โ ะผะฝะธะผะฐั ะตะดะธะฝะธัะฐ (ะฒะตะปะธัะธะฝะฐ, ะดะปั ะบะพัะพัะพะน ะฒัะฟะพะปะฝัะตััั ัะฐะฒะตะฝััะฒะพ $i^{2} = -1$). ะะฝะพะถะตััะฒะพ ะฒัะตั
ะบะพะผะฟะปะตะบัะฝัั
ัะธัะตะป ะพะฑะพะทะฝะฐัะฐะตััั ะฑัะบะฒะพะน $\mathbb{C}$ (ะฟะพะดัะพะฑะฝะตะต ะฟัะพ ะบะพะผะฟะปะตะบัะฝัะต ัะธัะปะฐ ัะผ. ะฒะธะบะธะฟะตะดะธั).
ะ ะฟะธัะพะฝะต ะบะพะผะฟะปะตัะบะฝัะต ัะธัะปะฐ ะผะพะถะฝะพ ะทะฐะดะฐัั ัะปะตะดัััะธะผ ะพะฑัะฐะทะพะผ (j ะพะฑะพะทะฝะฐัะฐะตั ะผะฝะธะผัั ะตะดะธะฝะธัั):
End of explanation
"""
c = a * a
d = a / (4 - 5j)
print "ะะพะผะฟะปะตะบัะฝะพะต ัะธัะปะพ c:\n", c
print "ะะพะผะฟะปะตะบัะฝะพะต ัะธัะปะพ d:\n", d
"""
Explanation: ะก ะบะพะผะฟะปะตะบัะฝัะผะธ ัะธัะปะฐะผะธ ะฒ ะฟะธัะพะฝะต ะผะพะถะฝะพ ะฟัะพะธะทะฒะพะดะธัั ะฑะฐะทะพะฒัะต ะฐัะธัะผะตัะธัะตัะบะธะต ะพะฟะตัะฐัะธะธ ัะฐะบ ะถะต, ะบะฐะบ ะธ ั ะฒะตัะตััะฒะตะฝะฝัะผะธ ัะธัะปะฐะผะธ:
End of explanation
"""
|
sadahanu/Capstone | SCRAPE/review_gather.ipynb | mit | # create the category data frame
cat_id = [1,2,3,4,5]
category = ['Balls and Fetch Toys','Chew Toys','Plush Toys','Interactive Toys','Rope and Tug']
link = ['https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A317','https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A316',
'https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A320','https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A319',
'https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A321']
pagerange = [19,15,17,8,9]
df_cat = pd.DataFrame({'cat_id':cat_id,'category':category,'link':link,'page range':pagerange})
df_data = df_cat.to_csv(index=False)
s3_res = boto3.resource('s3')
s3_res.Bucket('dogfaces').put_object(Key='reviews/category.csv', Body=df_data)
df_cat = pd.read_csv("s3://dogfaces/reviews/category.csv")
df_cat.head()
"""
Explanation: Data base structures at chewy.com
categories:
|cat id|category | link| page range|
|:-|:--------|:---:|:----------|
|1|Balls and Fetch Toys| https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A317 | 19|
|2|Chew Toys|https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A316 | 15|
|3|Plush Toys|https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A320 | 17|
|4|Interactive Toys|https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A319 |8|
|5|Rope and Tug | https://www.chewy.com/s?rh=c%3A288%2Cc%3A315%2Cc%3A321 |9|
for chewy.com the toy page is: catepage + '&page=?'
for each toy, build a toy table:
toys:
|toyid |name | link| catid | picture_link|price |reviews|review_link|review_page_num|
|:-----|:----|:----|:------|:------------|:-----|:------|:----------|:--------------|
|114660|usa-bones-chews-cotton-rope-bones|https://www.chewy.com/usa-bones-chews-cotton-rope-bones/dp/114660 |5| https://img.chewy.com/is/catalog/86885_MAIN.AC_SL400_V1477926797.jpg |5.69|568|/usa-bones-chews-cotton-rope-bones/product-reviews/114660?reviewSort=NEWEST&reviewFilter=ALL_STARS&pageNumber=1|58|
also build a review table:
|reviewid|toyid|toy_name | user_name| starts| review_text| time| image|image_id| useful|
|:-------|:----|:--------|:---------|:------|:-----------|:----|:---------|:------|:---|
|9809823|114660|usa-bones-chews-cotton-rope-bones|Steffy|5|We have...|Sep2, 2017|http://chewy.ugc.bazaarvoice.com/0090-en_us/2540529/photo.jpg |0090-en_us_2540529| 0 |
also build a image table:
|image_id|image_link|image_name|
|:-------|:---------|:---------|
|0090-en_us_254052|http://chewy.ugc.bazaarvoice.com/0090-en_us/2540529/photo.jpg | 0090-en_us_254052.jpg|
End of explanation
"""
def get_cat_link(cat_id):
try:
df = DF_CAT
except NameError:
df = pd.read_csv("s3://dogfaces/reviews/category.csv")
link = df[df['cat_id']==cat_id]['link'].values[0]
page_range = df[df['cat_id']==cat_id]['page range'].values[0]
return link, page_range
link, page_range = get_cat_link(1)
def get_toys(cat_id):
link, page_range = get_cat_link(cat_id)
res = []
for i in xrange(page_range):
toys_url = link+'&page={}'.format(i+1)
r = requests.get(toys_url)
if r.status_code == 200:
soup = BeautifulSoup(r.content)
for item in soup.select("article.product-holder.cw-card.cw-card-hover"):
num_reviews = int(item.select('p.rating.item-rating')[0].find('span').get_text())
if num_reviews > 10:
toy = {}
toy['num_reviews'] = num_reviews
raw_id = item.select('a')[0]['href']
toy['toy_link'] = "https://www.chewy.com"+item.select('a')[0]['href']
toy['toy_id'] = raw_id.split('/')[-1]
toy['toy_name'] = raw_id.split('/')[1]
toy['picture_link'] = "https:" + item.select('img')[0]['src']
toy['price'] = item.select('p.price')[0].get_text().split()[0]
res.append(toy)
return res
temp = get_toys(3)
df_test = pd.DataFrame.from_dict(temp)
print df_test['toy_link'][10]
from list_toys import *
get_toys(4)
print url
"""
Explanation: Start with rope and tug
End of explanation
"""
|
RJTK/dwglasso_cweeds | notebooks/clean_data.ipynb | mit | print('Original bounds: ', t[0], t[-1])
t_obs = t[D['T_flag'] != -1]
D = D[t_obs[0]:t_obs[-1]] # Truncate dataframe so it is sandwiched between observed values
t = D.index
T = D['T']
print('New bounds: ', t[0], t[-1])
t_obs = D.index[D['T_flag'] != -1]
t_interp = D.index[D['T_flag'] == -1]
T_obs = D.loc[t_obs, 'T']
T_interp = D.loc[t_interp, 'T']
c = ['b' if flag != -1 else 'orange' for flag in D['T_flag']
plt.scatter(t, T, c = c, alpha = 0.5, s = 0.5)
plt.title('T')
#obs = plt.scatter(t_obs, T_obs, marker = '.', alpha = 0.5, s = 0.5, color = 'blue');
#interp = plt.scatter(t_interp, T_interp, marker = '.', alpha = 0.5, s = 0.5, color = 'red');
# If I plot one after the other, the red is much more prominant... Very annoying
#plt.legend((obs, interp), ('Observed', 'Interpolated'), markerscale = 15);
"""
Explanation: Step 1: Truncate the series to the interval that has observations. Outside this interval the interpolation blows up.
End of explanation
"""
# Centre the data
mu = D['T'].mean()
D.loc[:, 'T'] = D.loc[:, 'T'] - mu
T = D['T']
print('E[T] = ', mu)
"""
Explanation: Red dots are interpolated values.
End of explanation
"""
T0 = T[0]
dT = T.diff()
dT = dT - dT.mean() # Center the differences
dT_obs = dT[t_obs]
dT_interp = dT[t_interp]
plt.scatter(t, dT, marker = '.', alpha = 0.5, s = 0.5, c = c)
#obs = plt.scatter(t_obs, dT_obs, marker = '.', alpha = 0.5, s = 0.5, color = 'blue');
#interp = plt.scatter(t_interp, dT_interp, marker = '.', alpha = 0.5, s = 0.5, color = 'red');
#plt.legend((obs, interp), ('Observed', 'Interpolated'), markerscale = 15);
plt.title('dT')
"""
Explanation: We want to obtain a stationary "feature" from the data, firt differences are an easy place to start.
End of explanation
"""
rolling1w_dT = dT.rolling(window = 7*24) # 1 week rolling window of dT
rolling1m_dT = dT.rolling(window = 30*24) # 1 month rolling window of dT
rolling1y_dT = dT.rolling(window = 365*24) # 1 year rolling dindow of dT
fig, axes = plt.subplots(3, 1)
axes[0].plot(rolling1w_dT.var())
axes[1].plot(rolling1m_dT.var())
axes[2].plot(rolling1y_dT.var())
"""
Explanation: It appears that early temperature sensors had rather imprecise readings.
It also appears as though the interpolation introduces some systematic errors. I used pchip interpolation, which tries to avoid overshoot, so we may be seeing the effects of clipping. This would particularly make sense if missing data was from regular periods, e.g. at night when the temperature was reaching a minimum.
End of explanation
"""
from itertools import product
t_days = [t[np.logical_and(t.month == m, t.day == d)] for m, d in product(range(1,13), range(1, 32))]
day_vars = pd.Series(dT[ti].var() for ti in t_days)
day_vars = day_vars.dropna()
plt.scatter(day_vars.index, day_vars)
r = day_vars.rolling(window = 20, center = True)
plt.plot(day_vars.index, r.mean(), color = 'red', linewidth = 2)
plt.title('Variance of dT, folded by days')
"""
Explanation: It looks like there is still some nonstationarity in the first differences.
End of explanation
"""
|
kit-cel/wt | sigNT/systems/frequency_response.ipynb | gpl-2.0 | # importing
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# showing figures inline
%matplotlib inline
# plotting options
font = {'size' : 20}
plt.rc('font', **font)
plt.rc('text', usetex=True)
matplotlib.rc('figure', figsize=(18, 10) )
"""
Explanation: Content and Objective
Show that frequency response can be generated by stimulating an LTI system with harmonics.
It is shown that Fourier transform of the impulse response yields identical results.
Importing and Plotting Options
End of explanation
"""
# length of impulse response
N = 10
# switch for choosing different impulse responses --> you may add more options if you like to
switch = 2
if switch == 1:
h = np.ones(N)
elif switch == 2:
a = 0.5
h = a**( - np.arange( 0, N ) )
# padding zeros
h = np.hstack( [h, np.zeros_like( h ) ] )
"""
Explanation: Parameters
End of explanation
"""
# frequency response by FFT
H_fft = np.fft.fft( np.hstack( [ h, np.zeros( 9 * len( h ) ) ] ) )
# frequency domain out of FFT parameters
delta_Omega = 2 * np.pi / len(H_fft )
Omega = np.arange( -np.pi, np.pi, delta_Omega )
"""
Explanation: Getting Frequency Response by Applying FFT
End of explanation
"""
# coarse quantiziation of frequency regime for the filterung in order to reduce computational load
N_coarse = 100
delta_Omega_coarse = 2 * np.pi / N_coarse
Omega_coarse = np.arange( -np.pi, np.pi, delta_Omega_coarse )
# getting values of frequency response by filtering
H_response = np.zeros_like( Omega_coarse, dtype = 'complex' )
for ind_Omega, val_Omega in enumerate( Omega_coarse ):
# length of signal, time vector and IN signal
N_sig = 500
n = np.arange( 0, N_sig + 1 )
x = np.exp( 1j * val_Omega * n )
# OUT signal by convolution
y = np.convolve( x, h )
# frequency response as factor
# NOTE: since the factor is the same for all times, an arbitrary sample may be chosen
H_response[ ind_Omega ] = y[ N_sig // 4 ] * x[ N_sig // 4 ].conjugate()
"""
Explanation: Getting Frequency Response as Response to Harmonics
End of explanation
"""
plt.figure()
plt.plot( Omega, np.abs( np.fft.fftshift( H_fft ) ), label= '$|H_{FFT}(\\Omega)|$' )
plt.plot( Omega_coarse, np.abs( H_response ), label= '$|H_{resp.}(\\Omega)|$')
plt.grid( True )
plt.xlabel('$\\Omega$')
plt.legend( loc='upper right')
plt.figure()
plt.plot( Omega, np.angle( np.fft.fftshift( H_fft ) ), label = '$\\angle H_{FFT}(\\Omega)$' )
plt.plot( Omega_coarse, np.angle( H_response ), label = '$\\angle H_{resp.}(\\Omega)$' )
plt.grid( True )
plt.xlabel('$\\Omega$')
plt.legend( loc='upper right')
plt.figure()
plt.plot( Omega[:-1], - np.diff( np.angle( np.fft.fftshift( H_fft ) ) ), label = '$\\tau_{g}(\\Omega)$' )
plt.grid( True )
plt.xlabel('$\\Omega$')
plt.legend( loc='upper right')
"""
Explanation: Plotting
End of explanation
"""
|
navaro1/deep-learning | intro-to-tflearn/TFLearn_Sentiment_Analysis.ipynb | mit | import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
"""
Explanation: Sentiment analysis with TFLearn
In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.
We'll start off by importing all the modules we'll need, then load and prepare the data.
End of explanation
"""
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
"""
Explanation: Preparing the data
Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.
Read the data
Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.
End of explanation
"""
from collections import Counter
total_counts = Counter()
for review in reviews.values:
for word in review[0].split(" "):
total_counts[word] += 1
print("Total words in data set: ", len(total_counts))
"""
Explanation: Counting word frequency
To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class.
Exercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours.
End of explanation
"""
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
"""
Explanation: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
End of explanation
"""
print(vocab[-1], ': ', total_counts[vocab[-1]])
"""
Explanation: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
End of explanation
"""
word2idx = {word: idx for idx, word in enumerate(vocab)}
"""
Explanation: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Note: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie.
Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.
Exercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on.
End of explanation
"""
def text_to_vector(text):
result = np.zeros([1, len(vocab)])
for word in text.split(" "):
idx = word2idx.get(word, None)
if idx is not None:
result[0][idx] += 1
return result
"""
Explanation: Text to vector function
Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:
Initialize the word vector with np.zeros, it should be the length of the vocabulary.
Split the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here.
For each word in that list, increment the element in the index associated with that word, which you get from word2idx.
Note: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary.
End of explanation
"""
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
"""
Explanation: If you do this right, the following code should return
```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
```
End of explanation
"""
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
"""
Explanation: Now, run through our entire review data set and convert each review to a word vector.
End of explanation
"""
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
"""
Explanation: Train, Validation, Test sets
Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.
End of explanation
"""
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
net = tflearn.input_data([None, len(vocab)])
net = tflearn.fully_connected(net, 1024, activation='ReLU')
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.025, loss='categorical_crossentropy')
return model
"""
Explanation: Building the network
TFLearn lets you build the network by defining the layers.
Input layer
For the input layer, you just need to tell it how many units you have. For example,
net = tflearn.input_data([None, 100])
would create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size.
The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.
Adding layers
To add new hidden layers, you use
net = tflearn.fully_connected(net, n_units, activation='ReLU')
This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units).
Output layer
The last layer you add is used as the output layer. Therefore, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.
net = tflearn.fully_connected(net, 2, activation='softmax')
Training
To set how you train the network, use
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
Again, this is passing in the network you've been building. The keywords:
optimizer sets the training method, here stochastic gradient descent
learning_rate is the learning rate
loss determines how the network error is calculated. In this example, with the categorical cross-entropy.
Finally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like
net = tflearn.input_data([None, 10]) # Input
net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden
net = tflearn.fully_connected(net, 2, activation='softmax') # Output
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
model = tflearn.DNN(net)
Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.
End of explanation
"""
model = build_model()
"""
Explanation: Intializing the model
Next we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.
Note: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.
End of explanation
"""
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=512, n_epoch=150)
"""
Explanation: Training the network
Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors.
You can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network.
End of explanation
"""
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
"""
Explanation: Testing
After you're satisified with your hyperparameters, you can run the network on the test set to measure its performance. Remember, only do this after finalizing the hyperparameters.
End of explanation
"""
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict(text_to_vector(sentence.lower()))[0][1]
print(model.predict(text_to_vector(sentence.lower())))
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
"""
Explanation: Try out your own text!
End of explanation
"""
|
dlsun/symbulate | docs/conditioning.ipynb | mit | from symbulate import *
%matplotlib inline
"""
Explanation: Symbulate Documentation
Conditioning
<a id='contents'></a>
Conditional distributions
Conditioning with |
Conditioning events
Conditioning on multiple events
Conditioning on events in a probability space
Conditioning on the value of a continuous RV
Specifying a joint distribution via conditional and marginal distributions
< Multiple random variables and joint distributions | Contents | Random processes >
Be sure to import Symbulate using the following commands.
End of explanation
"""
X, Y = RV(Binomial(2, 0.5) ** 2)
"""
Explanation: <a id='cond_dens_def'></a>
Conditional distributions
We are often interested in the conditional distribution of some random variables given the values of other random variables. For example, if $X$ represents height (inches) and $Y$ represents weight (pounds) for some population, then the conditional distribution of $X$ given $Y=150$ would represent the distribution of heights only for those in the population who weigh 150 pounds.
In principle, the conditional distribution of $X$ given $Y=y^$ (where $y^$ is a particular value, like 150 in the above example) can be simulated by (1) generating many $(x,y)$ pairs according to the joint distribution of $X$ and $Y$, (2) discarding all pairs for which $y\neq y^*$, and (3) summarizing the distribution of $x$ values for the pairs that remain.
<a id='pipe'></a>
Conditioning with |
In Symbulate, the process of conditioning can be accomplished directly using the vertical "pipe" symbol | (read "given")
(X | (Y == 150)).sim(10000)
We illustrate conditioning first with a simple detailed example.
Example. A penny and a nickle are each flipped twice. Let $X$ be the number of flips of the penny which land on heads, and $Y$ the number of flips the nickle lands on heads. What is the probability that the penny lands on heads exactly once, given that 3 heads are flipped in total?
First, $X$ and $Y$ will be independent, each with a Binomial(2, 0.5) distribution.
End of explanation
"""
(X & Y).sim(10000).tabulate()
"""
Explanation: The following simulates many $X, Y$ pairs. Note that unconditionally there are 9 possible values.
End of explanation
"""
((X & Y) | (X + Y == 3)).sim(10000).tabulate()
"""
Explanation: However, we only want to consider pairs for which $X+Y$ is 3. We accomplish this through conditioning as in the following code. Note
There are only two possible outcomes for which $X+Y=3$, (2 heads for penny, 1 for nickle; 1 head for penny, 2 for nickle), and so conditioning on $X+Y=3$ should return only these outcomes.
Simulating with .sim() when conditioning generates the specified number of values for which the conditioning event is true (i.e. 10000 $(X, Y)$ pairs for which the $X+Y = 3$.)
The conditioning event, (X + Y == 3), is placed in parentheses
The double equal sign == is used to check for equality
End of explanation
"""
x_given_y3 = (X | (X + Y == 3)).sim(10000)
x_given_y3.tabulate(normalize=True)
x_given_y3.plot()
"""
Explanation: The previous code illustrates the basic functionality of conditioning. RVs and simulations with conditioning can be manipulated using the simulation tools just as those without conditioning. For example, to find and plot the conditional distribution of $X$ given $X+Y=3$:
End of explanation
"""
X = RV(Exponential(1))
(X - 5 | (X > 5)).sim(10000).plot()
RV(Exponential(1)).sim(10000).plot()
"""
Explanation: <a id='event'></a>
Conditioning events
Conditioning events must satisfy the following.
The conditioning event must be placed in parentheses.
The condition must involve at least one RV. Remember that a function or transformation of RVs is also an RV.
The conditioning event must involve a comparison operator: ==, >, <, >=, <=
Example. Memoryless property of Exponential distributions.
End of explanation
"""
X, Y = RV(Poisson(1) * Poisson(2))
(X | (X >= Y)).sim(10000).mean()
"""
Explanation: Example. Soccer teams X and Y score goals independently of each other, and the number of goals each team scores has a Poisson distribution with mean 1 for X and mean 2 for Y. Find the expected number of goals that team X scores given that they score at least as many goals as team Y.
End of explanation
"""
X, Y = RV(Exponential(scale=0.5) ** 2)
(X | ((X < 2) & (X + Y > 2)) ).sim(10000).plot()
"""
Explanation: <a id='combine'></a>
Conditioning on multiple events
Multiple events can be conditioned on using the logical operators
~ for not: (~A)
& for and: (A & B)
| for or: (A | B). Caution: be sure to put parentheses around the conditioning events to avoid confusion between | for conditioning and | for or.
Example. Times between successive earthquakes are independent each having an Exponential distribution with mean 0.5 hour. Find the conditional distribution of the time (starting now) until the next earthquake occurs, given that exactly 1 earthquake occurs in the next 2 hours.
Let $X, Y$ be the times between the first two quakes, so that $X$ is the time of the first quake and $X+Y$ is the time of the second. The event "exactly 1 quake in the next 2 hours" is equivalent to "the first quake occurs within 2 hours", i.e. $(X < 2)$, AND the second quake occurs after 2 hours", i.e $(X + Y >2)$.
End of explanation
"""
cards = ['clubs', 'diamonds', 'hearts', 'spades'] * 13 # 13 cards of each suit
FirstCard, SecondCard, ThirdCard = RV(BoxModel(cards, size=3, replace=False))
"""
Explanation: <a id='outcome'></a>
Conditioning on events in a probability space
Symbulate allows for defining both ProbabilitySpaces and RVs. Conditioning is only available for RV. However, conditioning on events in a probability space can be accomplished by defining appropriate RVs and conditioning accordingly. Note that while technically a random variable maps outcomes to real numbers, a Symbulate RV is not required to take numerical values.
Example. Three cards are dealt without replacement from a standard deck of 52 cards. What is the conditional probability that the third card is a heart given that the first two cards are hearts? (Note that the true conditional probability is $11/50 = 0.22$.)
End of explanation
"""
(FirstCard & SecondCard & ThirdCard | ((FirstCard == 'hearts') & (SecondCard == 'hearts')) ).sim(100000).tabulate()
"""
Explanation: Note that FirstCard is an RV, but it takes non-numerical values ('clubs', etc). The following conditions on the FirstCard and SecondCard RVs taking the value 'hearts'.
End of explanation
"""
mu = [1, 2, 3]
Sigma = [[1, 1, -2],
[1, 4, 0],
[-2, 0, 9]]
X, Y, Z = RV(MultivariateNormal(mean=mu, cov=Sigma))
(X | (abs(Y - 1) < 0.01)).sim(1000).plot()
"""
Explanation: <a id='continuous'></a>
Conditioning on the value of a continuous RV
The probability that a continuous random variable equals any particular value is 0, so care must be taken when conditioning on the value of a continuous RV. If $X$ is continuous, conditioning on the event $X = x$, which has probability 0, can be interpreted in various ways. The simplest approach is to translate conditioning on $X=x$ as conditioning on values for which $X$ is "close to" $x$.
Example. $X, Y, Z$ have a multivariate normal distribution with mean vector and covariance matrix below. Find the conditional distribution of $X$ given $Y=1$.
We condition on $Y$ being within 0.01 of 1, using (abs(Y - 1) < 0.01). Note that this conditioning event has probability 0.005, so even though the probability is non-zero, it will still take some time to generate enough repetitions satisfying the condition.
End of explanation
"""
(X & Z | (abs(Y - 1) < 0.01)).sim(1000).plot()
"""
Explanation: We can also find the conditional joint distribution of $X$ and $Y$ given $Y=1$.
End of explanation
"""
yz = (Y & Z).sim(10000)
yz.plot()
print('The correlation coefficient of Y and Z is approximately {:.3f}'.format(yz.corr()))
"""
Explanation: In this example, $Y$ and $Z$ are independent.
End of explanation
"""
yz_given_x = (Y & Z | (abs(X - 2) < 0.01)).sim(1000)
yz_given_x.plot()
print('The correlation coefficient of Y and Z given X equals 2 is approximately {:.3f}'.format(yz_given_x.corr()))
"""
Explanation: But $Y$ and $Z$ are not conditionally independent given $X = 2$.
End of explanation
"""
def binomial_given_uniform():
x = Uniform(0,1).draw()
y = Binomial(10, x).draw()
return x, y
X, Y = RV(ProbabilitySpace(binomial_given_uniform))
(X & Y).sim(10000).plot(jitter=True, alpha=0.1)
"""
Explanation: <a id='conditional'></a>
Specifying a joint distribution via conditional distribution and marginal distributions
The joint distribution fully specifies the conditional and marginal distributions, and so conditioning can be used when the joint distribution is specified. However, in many situations we specify a joint distribution indirectly by specifying appropriate conditional and marginal distributions. Such a specification can be made in Symbulate using a custom ProbabilitySpace.
Example. Suppose that $X$ has a Uniform(0,1) distribution and that, given $X=x$, $Y$ has a Binomial(10, $x$) distribution.
First define the joint distribution of $X$ and $Y$ using a custom probability space. We first draw a value x from a Uniform(0,1) distribution. Then we draw a value y from a Binomial(10, x) distributions.
End of explanation
"""
(Y | (abs(X - 0.3) < 0.01) ).sim(10000).plot()
RV(Binomial(10, 0.3)).sim(10000).plot(jitter=True)
"""
Explanation: Use the joint distribution to simulate the conditional distribution of $Y$ given $X=0.3$ (and compare to Binomial(10, 0.3)).
End of explanation
"""
(X | (Y == 3) ).sim(10000).plot()
"""
Explanation: Simulate the conditional distribution of $X$ given $Y=3$.
End of explanation
"""
|
zlxs23/Python-Cookbook | data_structure_and_algorithm_py3_5.ipynb | apache-2.0 | rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
# ๆ นๆฎไปปๆdict field ๆฅๆๅบ่พๅ
ฅ็ปๆ่ก
from operator import itemgetter
rows_by_fname = sorted(rows,key=itemgetter('fname'))
rows_by_uid = sorted(rows,key=itemgetter('uid'))
print(rows_by_fname,'\n')
print(rows_by_uid)
"""
Explanation: 1.13 ้่ฟๆไธชๅ
ณ้ฎๅญๆๅบไธไธชdict list
ๆไธไธชdict list ๆ นๆฎๆไธชๆๆๅ ไธชdict ๅญๆฎต ๆฅๆๅบ
้่ฟไฝฟ็จ operator ๆจกๅ็itemgetterๅฝๆฐ ๅฏ้ๅธธๅฎนๆ็ๆๅบ่ฟๆ ท็data structure ๅ่ฎพไฝ ไปDB ไธญๆฃ็ดขๅบๆฅ็ฝ็ซไผๅไฟกๆฏๅ่กจ ๅนถไธไปฅไธๅdata structure ่ฟๅ
End of explanation
"""
rows_by_lfname = sorted(rows,key=itemgetter('lname','fname'))
print(rows_by_lfname)
"""
Explanation: itemgetter function ไนๆฏๆๅคไธชkeys
End of explanation
"""
rows_by_fname = sorted(rows,key=lambda r:r['fname'])
rows_by_lfname = sorted(rows,key=lambda r:(r['fname'],r['lname']))
# ไปฅไธๆนๆณ ไธ itemgetter ็ฑปไผผ ไธป่ฆๆฏ sorted func ไธญ key argument
"""
Explanation: rows ่ขซไผ ้็ปๆฅๅไธไธชkeys ๅๆฐ็sorted ๅ
็ฝฎfun ๆญค ๅๆฐis callable ็ฑปๅ ๅนถไปrowsๆฅๅไธไธชๅไธๅ
็ด ็ถๅ่ฟๅ่ขซ็จๆฅๆๅบ็ๅผ itemgetter ่ด่ดฃๅๅปบcallable ๅฏน่ฑก<br>operator.itemgetter func ๆไธไธช่ขซrowsๅผ ็่ฎฐๅฝ็จๆฅๆฅๆพๅผๅพindexๅๆฐ ๅฏไปฅๆฏไธไธชdict.keyๅ็งฐ ไธไธช int ๅผ or ไปปไฝ่ฝๅคไผ ๅ
ฅไธไธชobject ็__getitem__ๆนๆณ็ๅผ if can ไผ ๅ
ฅๅคไธชindex ๅๆฐ็ปitemgetter ๅ
ถ็ๆ callable object ๆฅ่ฟๅไธไธชๅ
ๅซ ๆๆelement ๅผๅพtuple ๅนถไธๆ็ญ็ๅฝๆฐไผๆ นๆฎ่ฟไธชtuple ไธญelement ๅปๆๅบ
itemgetter ไบฆๅฏ็จlambda ๆฅไปฃๆฟ
End of explanation
"""
mi = min(rows,key=itemgetter('uid'))
ma = max(rows,key=itemgetter('uid'))
print(mi,'||\n',ma)
"""
Explanation: ไฝฟ็จitemgetter ๆนๅผ ่ฟ่ก้ๅบฆๅฟซ ๆง่ฝ ่ฆๆฑๆฏ่พ้ซ็่ฏไฝฟ็จitemgetter ๅ็ๅจsorted ไธญไฝฟ็จ ไบฆๅฏๆจๅนฟๅฐmax minไธญ
End of explanation
"""
class User:
def __init__(self,user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
# ่ฟ้__repr__ๅพ้่ฆ!!!!!
users = [User(23), User(3), User(88)]
print(users)
print(sorted(users, key=lambda u: u.user_id))
"""
Explanation: 1.14 ๆๅบไธๆฏๆๆฏ่พ็ๅฏน่ฑก
ๆณ่ฆๆๅบ็ฑปๅ็ธๅ็ๅฏน่ฑก๏ผbut ไธๆฏๆ ๅ็็ๆฏ่พๆไฝ
ๅ
็ฝฎ็sorted ๅฝๆฐๆไธไธชๅ
ณ้ฎๅญๅๆฐkey ๅฏไผ ๅ
ฅcallable ๅฏน่ฑก็ปไป ๆญคcallable ๅฏน่ฑกๅฏนๆฏไธชไผ ๅ
ฅ็ๅฏน่ฑก่ฟๅไธไธชๅผ๏ผๆญคๅผไผ่ขซsorted ็จๆฅๆๅบ่ฟไบๅฏน่ฑก ็ฑปไผผ๏ผif ไฝ ๅจ็จๅบไธญๆไธไธชUser instance Sequence and want to ้่ฟไปไปฌ็user_id ๅฑๆง่ฟ่กๆๅบ ๅฏๆไพไธไธชไปฅUser instance ไฝไธบ่พๅ
ฅๅนถ่พๅบๅฏนๅบ็user_id ๅผ็callable ๅฏน่ฑก
End of explanation
"""
from operator import attrgetter
sorted(users, key=attrgetter('user_id'))
"""
Explanation: ๅฆไธๆๆฏไฝฟ็จ operator.attrgetter() ๆฅไปฃๆฟ lambda ๅฝๆฐ
End of explanation
"""
by_name = sorted(users,key=attrgettter('last_name','first_name'))
"""
Explanation: itemgetter(ไฝ็จไบdict type) & attrgetter ้ฝๆฏ from operator import ๅพ็ฑปไผผ<br>ไปไปฌ้ฝๅฏไปฅ ็จlambda ๅฝๆฐๅปๆฟไปฃ ไธ่ฟๅไธค่
่ฟ่ก้ๅบฆๅฟซไธ็น and ๅฏๅๆถๅ
่ฎธๅคไธชๅญๆฎต่ฟ่กๆฏ่พ
if User instance ่ฟๆไธไธชfirst name and last name ๅฑๆง ๅๅฏๅไธ้ข่ฟ่กๆๅบ
End of explanation
"""
a = min(users,key=attrgetter('user_id'))
b = max(users,key=attrgetter('user_id'))
print(a,'\n',b)
"""
Explanation: ๅๆ ท attrgetter ้็จไบmin max ไน็ฑปๅฝๆฐ
End of explanation
"""
rows = [
{'address': '5412 N CLARK', 'date': '07/01/2012'},
{'address': '5148 N CLARK', 'date': '07/04/2012'},
{'address': '5800 E 58TH', 'date': '07/02/2012'},
{'address': '2122 N CLARK', 'date': '07/03/2012'},
{'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},
{'address': '1060 W ADDISON', 'date': '07/02/2012'},
{'address': '4801 N BROADWAY', 'date': '07/01/2012'},
{'address': '1039 W GRANVILLE', 'date': '07/04/2012'},
]
# ๅ่ฎพๆณๅจdate ๅ็ปๅ็data ๅไธ่ฟ่ก่ฟญไปฃ ้ฆๅ
้่ฆๆ็
งๆๅฎๅญๆฎตdate ่ฟ่กๆๅบ ๅ่ฐ็จitertools.groupby()่ฟ่กๅ็ป
from operator import itemgetter
from itertools import groupby
# Sort by the desited field first
rows.sort(key=itemgetter('date'))
# Iterate in groups
for date, item in groupby(rows,key=itemgetter('date')):
print(date)
for i in item:
print(' ',i)
"""
Explanation: 1.15 ้่ฟๆไธชๅญๆฎตๅฐ่ฎฐๅฝ่ฟ่กๅ็ป
ๆไธชdict or instance ็ sequence ๆณๆ นๆฎๆไธช็นๅฎๅญๆฎตๆฏๅฆdate ๆฅๅ็ป่ฟญไปฃ่ฎฟ้ฎ
itertools.groupby() ๅฝๆฐ
End of explanation
"""
from collections import defaultdict
rows_by_date = defaultdict(list)
for row in rows:
rows_by_date[row['date']].append(row)
# ๅฏ่ฝปๆพ่ฎฟ้ฎๅฐฑ่ฝๅฏนๆฏไธชๆๅฎdate ่ฟ่กๅฏนๅบ่ฎฐๅฝ่ฎฟ้ฎ
for r in rows_by_date['07/01/2012']:
print(r)
"""
Explanation: groupby ๅฝๆฐ ๆซๆๆดไธชๅบๅ ๅนถไธๆฅๆพ่ฟ็ปญ็ธๅๅผ๏ผๆๆ นๆฎkey ๅฝๆฐreturnๅผ็ธๅ๏ผ็ๅ
็ด sequence ๅจๆฏๆฌก่ฟญไปฃๆถ ๅ
ถreturn ไธไธชๅผๅไธไธช่ฟญไปฃๅฏน่ฑก ่ฟไธช่ฟญไปฃๅฏน่ฑก ๅฏ็ๆ ๅ
็ด ๅผๅ
จไธ็ญไบไธ้ข้ฃไธชๅผ็็ปไธญ็ๆๆๅฏน่ฑก<br><br>ไธไธช้ๅธธ้่ฆ็ๅๅคๆญฅ้ชค ๅณๆ นๆฎ ๆๅฎๅญๆฎต่ฟๆฐๆฎๆๅบ ๅ ไธบgroupby ๅฝๆฐไป
ไป
ๆฃๆฅ่ฟ็ปญ็ๅ
็ด IF ๅฎ็ฐๅนถๆฒกๆๆๅบๅฎๆ ็่ฏ ๅ็ปๅฝๆฐ้ๅพๅฐๆๆณไธๅฐ็็ปๆ<br><br>IF ไป
ไป
ๆฏๆ นๆฎdate field ๆฅๅฐdata ๅ็ปๅฐไธไธชๅคง็ๆฐๆฎ็ปๆ ไธญ ๅๆถๅ
่ฎธ้ๆบ่ฎฟ้ฎ ๅๆๅฅฝ่ฏด้ฃไธชdefaultdict ๅฝๆฐๆฅๆๅปบไธไธชๅคๅผdict
End of explanation
"""
# way 1
mlist = [1,4,-5,10,2,-7,10,3,2,-1]
[n for n in mlist if n > 0]
[n for n in mlist if n < 0]
"""
Explanation: ไธ่ฟฐไพๅญๅนถๆฒกๆๅฏน่ฎฐๅฝ่ฟ่กๅ
ๆๅบ ่ฟ็งๆนๅผ ๅฟซ้ฑผ ๅ
ๆๅgroupby ๅฝๆฐ่ฟญไปฃๆนๅผ่ฟ่กๅพๅฟซ
1.16 ่ฟๆปคๅบๅๅ
็ด
้ๅฏนdata sequence ๅฉ็จๅ
ถไธญ่งๅไปไธญๆๅ้่ฆ็ๅผ ๆ่
ๆฏ็ผฉ็ญๅบๅ
ๆ็ฎๅ็่ฟๆปคๅบๅๅ
็ด ็ๆนๆณๅฐฑๆฏไฝฟ็จๅ่กจๆจๅฏผ
ๅฐ่ฟๆปคไปฃ็ ๆพ่ณไธไธชๅฝๆฐ ๅไฝฟ็จๅ
ๅปบfilter ๅฝๆฐ
ๅฉ็จ่ฟๆปคๅทฅๅ
ท itertools.compress() ไปฅไธไธชiterable ๅฏน่ฑก and ็ธๅฏนๅบBoolean ้ๆฉๅจ sequence ไฝไธบ่พๅ
ฅ ่พๅบ iterable ๅฏน่ฑกไธญๅฏนๅบ้ๆฉๅจไธบTrue ็element
End of explanation
"""
pos = (n for n in mlist if n > 0)
pos
for x in pos:
print(x)
"""
Explanation: ๅฉ็จ list ๆจๅฏผ็ๆฝๅจ ็ผบ้ท if input ้ๅธธๅคง ไผไบง็ไธไธช่พๅคง็ปๆset ๅ ็จๅ
ๅญ ๅไฝฟ็จ็ๆๅจ่กจ่พพๅผไบ่ฟญไปฃๆฅไบง็่ฟๆปคelement
End of explanation
"""
values = ['1','2','-4','-',4,'N/A',5]
def is_int(val):
try:
x = int(val)
return True
except ValueError:
return False
ivals = list(filter(is_int,values))
print(ivals)
# filter ๅฝๆฐๅๅปบไธไธช่ฟญไปฃๅจ so if ไปฅไธไธชlist ๅฐฑๅพๅinstance ้ฃๆ ทไฝฟ็จlist ๆฅ่ฝฌๆข
"""
Explanation: ่ฟๆปค่งๅๆฏ่พๅคๆ ไธ่ฝ็ฎๅ็ๅจlist ๅฏนๅฐๆ่
็ๆๅจ ่กจ่พพๅผไธญ่กจ่พพๅบๆฅ ๅ่ฎพ่ฟๆปค็ๆถๅ ้่ฆๅค็ไธไบๅผๅธธๆๅ
ถไป่ด่ดฃๆ
ๅต<br> ๆญคๆถๅฐ่ฟๆปคไปฃ็ ๆพๅฐไธไธชๅฝๆฐไธญ ็ถๅ ไฝฟ็จๅ
ๅปบfilter ๅฝๆฐ
End of explanation
"""
mylist = [1,4,2,-5,4,7,9,2,3,-1]
import math
[math.sqrt(n) for n in mylist if n > 0]
"""
Explanation: ๅ่กจๆจๅฏผ and ็ๆๅจ่กจ่พพๅผ ้ๅธธๆ
ๅตไธๆฏๆ็ฎๅ็ๆนๅผ<br>ๅฏๅจ่ฟๆปคๆถ่ฝฌๆขๆฐๆฎ
End of explanation
"""
clip = [n if n > 0 else 0 for n in mylist]
clip
slip = [n if n < 0 else 0 for n in mylist]
slip
"""
Explanation: ่ฟๆปคๆไฝ ไธไธชๅ็งๅณ ๅฐไธ็ฌฆๅ็ๅผ็จๆฐๅผ่ฟ่กๆฟไปฃ ่ไธๆฏไธขๅผไปไปฌ<br>IF ๅจไธๅdata ไธญ ไธไป
่ฆๆพๅบๆญฃๆฐ ๅๆถไน่ฆๅฐไธๆฏๆญฃๆฐ็ๆฐๆฟๆขๆๆๅฎ็ๆฐ ้่ฟ ่ฟๆปคๆกไปถ ๆพๅฐๆกไปถ่กจ่พพๅผไธญๅป ๅฏๅพๅฎนๆ็่งฃๅณๆญค้ฎ้ข
End of explanation
"""
'''
ๅฝไฝ ้่ฆ็จๅฆไธไธช็ธๅ
ณ่็sequenceๆฅ่ฟๆปคๆไธชๅบๅๆถ
'''
addresses = [
'5412 N CLARK',
'5148 N CLARK',
'5800 E 58TH',
'2122 N CLARK'
'5645 N RAVENSWOOD',
'1060 W ADDISON',
'4801 N BROADWAY',
'1039 W GRANVILLE',
]
counts = [ 0, 3, 10, 4, 1, 7, 6, 1]
'''
ๅฐ้ฃไบcount ๅผๅฏนๅบๅคงไบ 5 ็ๅฐๅๅ
จ้จ่พๅบ
'''
if len(addresses) == len(counts):
print('addresses == counts')
from itertools import compress
from collections import defaultdict
more5 = [n > 5 for n in counts]
cm = dict()
for i in range(len(counts)):
cm[counts[i]] = more5[i]
cm
list(compress(addresses,more5))
"""
Explanation: ๅฆๅคไธไธช่ฟๆปคๅทฅๅ
ท ๆฏitertools.compress
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/noaa-gfdl/cmip6/models/sandbox-2/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'sandbox-2', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: NOAA-GFDL
Source ID: SANDBOX-2
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:35
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
ud3sh/coursework | deeplearning.ai/coursera-improving-neural-networks/week1/assignment1/Initialization.ipynb | unlicense | import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
"""
Explanation: Initialization
Welcome to the first assignment of "Improving Deep Neural Networks".
Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
A well chosen initialization can:
- Speed up the convergence of gradient descent
- Increase the odds of gradient descent converging to a lower training (and generalization) error
To get started, run the following cell to load the packages and the planar dataset you will try to classify.
End of explanation
"""
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
"""
Explanation: You would like a classifier to separate the blue dots from the red dots.
1 - Neural Network model
You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
- Zeros initialization -- setting initialization = "zeros" in the input argument.
- Random initialization -- setting initialization = "random" in the input argument. This initializes the weights to large random values.
- He initialization -- setting initialization = "he" in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
Instructions: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this model() calls.
End of explanation
"""
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (โ 2 lines of code)
parameters['W' + str(l)] = np.zeros(shape = (layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
Explanation: 2 - Zero initialization
There are two types of parameters to initialize in a neural network:
- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
Exercise: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
End of explanation
"""
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 0. 0. 0.]
[ 0. 0. 0.]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[ 0. 0.]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using zeros initialization.
End of explanation
"""
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
End of explanation
"""
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (โ 2 lines of code)
parameters['W' + str(l)] = np.random.randn( layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
Explanation: The model is predicting 0 for every example.
In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
<font color='blue'>
What you should remember:
- The weights $W^{[l]}$ should be initialized randomly to break symmetry.
- It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
3 - Random initialization
To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
Exercise: Implement the following function to initialize your weights to large random values (scaled by *10) and your biases to zeros. Use np.random.randn(..,..) * 10 for weights and np.zeros((.., ..)) for biases. We are using a fixed np.random.seed(..) to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
End of explanation
"""
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 17.88628473 4.36509851 0.96497468]
[-18.63492703 -2.77388203 -3.54758979]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[-0.82741481 -6.27000677]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using random initialization.
End of explanation
"""
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
End of explanation
"""
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (โ 2 lines of code)
parameters['W' + str(l)] = np.random.randn( layers_dims[l], layers_dims[l-1]) * np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros(shape = (layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
Explanation: Observations:
- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
- If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
<font color='blue'>
In summary:
- Initializing weights to very large random values does not work well.
- Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
4 - He initialization
Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of sqrt(1./layers_dims[l-1]) where He initialization would use sqrt(2./layers_dims[l-1]).)
Exercise: Implement the following function to initialize your parameters with He initialization.
Hint: This function is similar to the previous initialize_parameters_random(...). The only difference is that instead of multiplying np.random.randn(..,..) by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
End of explanation
"""
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 1.78862847 0.43650985]
[ 0.09649747 -1.8634927 ]
[-0.2773882 -0.35475898]
[-0.08274148 -0.62700068]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]
[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using He initialization.
End of explanation
"""
|
sarahmid/programming-bootcamp-v2 | lab6_exercises_ANSWERS.ipynb | mit | def fancy_calc(a, b, c):
x1 = basic_calc(a,b)
x2 = basic_calc(b,c)
x3 = basic_calc(c,a)
z = x1 * x2 * x3
return z
def basic_calc(x, y):
result = x + y
return result
x = 1
y = 2
z = 3
result = fancy_calc(x, y, z)
"""
Explanation: Programming Bootcamp 2016
Lesson 6 Exercises -- ANSWERS
Earning points (optional)
Enter your name below.
Email your .ipynb file to me (sarahmid@mail.med.upenn.edu) before 9:00 am on 9/27.
You do not need to complete all the problems to get points.
I will give partial credit for effort when possible.
At the end of the course, everyone who gets at least 90% of the total points will get a prize (bootcamp mug!).
Name:
1. Guess the output: scope practice (2pts)
Refer to the code below to answer the following questions:
End of explanation
"""
print x
print z
print x1
print result
"""
Explanation: (A) List the line numbers of the code above in the order that they will be executed. If a line will be executed more than once, list it each time.
NOTE: Select the cell above and hit "L" to activate line numbering!
Answer:
12
13
14
15
1
2
8
9
10
2
3
8
9
10
3
4
8
9
10
4
5
6
15
(B) Guess the output if you were to run each of the following pieces of code immediately after running the code above. Then run the code to see if you're right. (Remember to run the code above first)
End of explanation
"""
# run this first!
def getMax(someList):
someList.sort()
x = someList[-1]
return x
scores = [9, 5, 7, 1, 8]
maxScore = getMax(scores)
print maxScore
print someList
print scores
"""
Explanation: 2. Data structure woes (2pt)
(A) Passing a data structure to a function. Guess the output of the following lines of code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.
End of explanation
"""
# run this first!
list1 = [1, 2, 3, 4]
list2 = list1
list2[0] = "HELLO"
print list2
print list1
"""
Explanation: Why does scores get sorted?
When you pass a data structure as a parameter to a function, it's not a copy of the data structure that gets passed (as what happens with regular variables). What gets passed is a direct reference to the data structure itself.
The reason this is done is because data structures are typically expected to be fairly large, and copying/re-assigning the whole thing can be both time- and memory-consuming. So doing things this way is more efficient. It can also surprise you, though, if you're not aware it's happening. If you would like to learn more about this, look up "Pass by reference vs pass by value".
(B) Copying data structures. Guess the output of the following code if you were to run them immediately following the code block below. Then run the code yourself to see if you're right.
End of explanation
"""
# for lists
list1 = [1, 2, 3, 4]
list2 = list(list1) #make a true copy of the list
list2[0] = "HELLO"
print list2
print list1
"""
Explanation: Yes, that's right--even when you try to make a new copy of a list, it's actually just a reference to the same list! This is called aliasing. The same thing will happen with a dictionary. This can really trip you up if you don't know it's happening.
So what if we want to make a truly separate copy? Here's a way for lists:
End of explanation
"""
# for dictionaries
dict1 = {'A':1, 'B':2, 'C':3}
dict2 = dict1.copy() #make a true copy of the dict
dict2['A'] = 99
print dict2
print dict1
"""
Explanation: And here's a way for dictionaries:
End of explanation
"""
def gc(seq):
gcCount = seq.count("C") + seq.count("G")
gcFrac = float(gcCount) / len(seq)
return round(gcFrac,2)
"""
Explanation: 3. Writing custom functions (8pts)
Complete the following. For some of these problems, you can use your code from previous labs as a starting point.
(If you didn't finish those problems, feel free to use the code from the answer sheet, just make sure you understand how they work! Optionally, for extra practice you can try re-writing them using some of the new things we've learned since then.)
(A) (1pt) Create a function called "gc" that takes a single sequence as a parameter and returns the GC content of the sequence (as a 2 decimal place float).
End of explanation
"""
def reverse_compl(seq):
complements = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}
compl = ""
for char in seq:
compl = complements[char] + compl
return compl
"""
Explanation: (B) (1pt) Create a function called "reverse_compl" that takes a single sequence as a parameter and returns the reverse complement.
End of explanation
"""
def read_fasta(fileName):
ins = open(fileName, 'r')
seqDict = {}
activeID = ""
for line in ins:
line = line.rstrip('\r\n')
if line[0] == ">":
activeID = line[1:]
if activeID in seqDict:
print ">>> Warning: repeat id:", activeID, "-- overwriting previous ID."
seqDict[activeID] = ""
else:
seqDict[activeID] += line
ins.close()
return seqDict
"""
Explanation: (C) (1pt) Create a function called "read_fasta" that takes a file name as a parameter (which is assumed to be in fasta format), puts each fasta entry into a dictionary (using the header line as a key and the sequence as a value), and then returns the dictionary.
End of explanation
"""
def rand_seq(length):
import random
nts = ['A','C','G','T']
seq = ""
for i in range(length):
seq += random.choice(nts)
return seq
"""
Explanation: (D) (2pts) Create a function called "rand_seq" that takes an integer length as a parameter, and then returns a random DNA sequence of that length.
Hint: make a list of the possible nucleotides
End of explanation
"""
def shuffle_nt(seq):
import random
strList = list(seq)
random.shuffle(strList)
shuffSeq = "".join(strList)
return shuffSeq
"""
Explanation: (E) (2pts) Create a function called "shuffle_nt" that takes a single sequence as a parameter and returns a string that is a shuffled version of the sequence (i.e. the same nucleotides, but in a random order).
Hint: Look for Python functions that will make this easier. For example, the random module has some functions for shuffling. There may also be some built-in string functions that are useful. However, you can also do this just using things we've learned.
End of explanation
"""
##### testing gc
gcCont = gc("ATGGGCCCAATGG")
if type(gcCont) != float:
print ">> Problem with gc: answer is not a float, it is a %s." % type(gcCont)
elif gcCont != 0.62:
print ">> Problem with gc: incorrect answer (should be 0.62; your code gave", gcCont, ")"
else:
print "gc: Passed."
##### testing reverse_compl
revCompl = reverse_compl("GGGGTCGATGCAAATTCAAA")
if type(revCompl) != str:
print ">> Problem with reverse_compl: answer is not a string, it is a %s." % type(revCompl)
elif revCompl != "TTTGAATTTGCATCGACCCC":
print ">> Problem with reverse_compl: answer (%s) does not match expected (%s)" % (revCompl, "TTTGAATTTGCATCGACCCC")
else:
print "reverse_compl: Passed."
##### testing read_fasta
try:
ins = open("horrible.fasta", 'r')
except IOError:
print ">> Can not test read_fasta because horrible.fasta is missing. Please add it to the directory with this notebook."
else:
seqDict = read_fasta("horrible.fasta")
if type(seqDict) != dict:
print ">> Problem with read_fasta: answer is not a dictionary, it is a %s." % type(seqDict)
elif len(seqDict) != 22:
print ">> Problem with read_fasta: # of keys in dictionary (%s) does not match expected (%s)" % (len(seqDict), 22)
else:
print "read_fasta: Passed."
##### testing rand_seq
randSeq1 = rand_seq(23)
randSeq2 = rand_seq(23)
if type(randSeq1) != str:
print ">> Problem with rand_seq: answer is not a string, it is a %s." % type(randSeq1)
elif len(randSeq1) != 23:
print ">> Problem with rand_seq: answer length (%s) does not match expected (%s)." % (len(randSeq1), 23)
elif randSeq1 == randSeq2:
print ">> Problem with rand_seq: generated the same sequence twice (%s) -- are you sure this is random?" % randSeq1
else:
print "rand_seq: Passed."
##### testing shuffle_nt
shuffSeq = shuffle_nt("AAAAAAGTTTCCC")
if type(shuffSeq) != str:
print ">> Problem with shuffle_nt: answer is not a string, it is a %s." % type(shuffSeq)
elif len(shuffSeq) != 13:
print ">> Problem with shuffle_nt: answer length (%s) does not match expected (%s)." % (len(shuffSeq), 12)
elif shuffSeq == "AAAAAAGTTTCCC":
print ">> Problem with shuffle_nt: answer is exactly the same as the input. Are you sure this is shuffling?"
elif shuffSeq.count('A') != 6:
print ">> Problem with shuffle_nt: answer doesn't contain the same # of each nt as the input."
else:
print "shuff_seq: Passed."
"""
Explanation: (F) (1pt) Run the code below to show that all of your functions work. Try to fix any that have problems.
End of explanation
"""
for i in range(20):
print rand_seq(50)
"""
Explanation: 4. Using your functions (5pts)
Use the functions you created above to complete the following.
(A) (1pt) Create 20 random nucleotide sequences of length 50 and print them to the screen.
End of explanation
"""
seqDict = read_fasta("horrible.fasta")
for seqID in seqDict:
print reverse_compl(seqDict[seqID])
"""
Explanation: (B) (1pt) Read in horrible.fasta into a dictionary. For each sequence, print its reverse complement to the screen.
End of explanation
"""
seqDict = read_fasta("horrible.fasta")
print "SeqID\tLen\tGC"
for seqID in seqDict:
seq = seqDict[seqID]
seqLen = len(seq)
seqGC = gc(seq)
print seqID + "\t" + str(seqLen) + "\t" + str(seqGC)
"""
Explanation: (C) (3pts) Read in horrible.fasta into a dictionary. For each sequence, find the length and the gc content. Print the results to the screen in the following format:
SeqID Len GC
... ... ...
That is, print the header shown above (separating each column's title by a tab (\t)), followed by the corresponding info about each sequence on a separate line. The "columns" should be separated by tabs. Remember that you can do this printing as you loop through the dictionary... that way you don't have to store the length and gc content.
(In general, this is the sort of formatting you should use when printing data files!)
End of explanation
"""
# Method 1
# Generic kmer generation for any k and any alphabet (default is DNA nt)
# Pretty fast
def get_kmers1(k, letters=['A','C','G','T']):
kmers = []
choices = len(letters)
finalNum = choices ** k
# initialize to blank strings
for i in range(finalNum):
kmers.append("")
# imagining the kmers lined up vertically, generate one "column" at a time
for i in range(k):
consecReps = choices ** (k - (i + 1)) #number of times to consecutively repeat each letter
patternReps = choices ** i #number of times to repeat pattern of letters
# create the current column of letters
index = 0
for j in range(patternReps):
for m in range(choices):
for n in range(consecReps):
kmers[index] += letters[m]
index += 1
return kmers
get_kmers1(3)
# Method 2
# Generate numbers, discard any that aren't 1/2/3/4's, convert to letters.
# Super slow~
def get_kmers2(k):
discard = ["0", "5", "6", "7", "8", "9"]
convert = {"1": "A", "2": "T", "3": "G", "4": "C"}
min = int("1" * k)
max = int("4" * k)
kmers = []
tmp = []
for num in range(min, (max + 1)): # generate numerical kmers
good = True
for digit in str(num):
if digit in discard:
good = False
break
if good == True:
tmp.append(num)
for num in tmp: # convert numerical kmers to ATGC
result = ""
for digit in str(num):
result += convert[digit]
kmers.append(result)
return kmers
# Method 3 (by Nate)
# A recursive solution. Fast!
# (A recursive function is a function that calls itself)
def get_kmers3(k):
nt = ['A', 'T', 'G', 'C']
k_mers = []
if k == 1:
return nt
else:
for i in get_kmers3(k - 1):
for j in nt:
k_mers.append(i + j)
return k_mers
# Method 4 (by Nate)
# Fast
def get_kmers4(k):
nt = ['A', 'T', 'G', 'C']
k_mers = []
total_kmers = len(nt)**k
# make a list of size k with all zeroes.
# this keeps track of which base we need at each position
pointers = []
for p in range(k):
pointers.append(0)
for k in range(total_kmers):
# use the pointers to generate the next k-mer
k_mer = ""
for p in pointers:
k_mer += nt[p]
k_mers.append(k_mer)
# get the pointers ready for the next k-mer by updating them left to right
pointersUpdated = False
i = 0
while not pointersUpdated and i < len(pointers):
if pointers[i] < len(nt) - 1:
pointers[i] += 1
pointersUpdated = True
else:
pointers[i] = 0
i += 1
return k_mers
# Method 5 (by Justin Becker, bootcamp 2013)
# Fast!
def get_kmers5(k): #function requires int as an argument
kmers = [""]
for i in range(k): #after each loop, kmers will store the complete set of i-mers
currentNumSeqs = len(kmers)
for j in range(currentNumSeqs): #each loop takes one i-mer and converts it to 4 (i+1)=mers
currentSeq = kmers[j]
kmers.append(currentSeq + 'C')
kmers.append(currentSeq + 'T')
kmers.append(currentSeq + 'G')
kmers[j] += 'A'
return kmers
# Method 6 (by Nick)
# Convert to base-4
def get_kmers6(k):
bases = ['a', 'g', 'c', 't']
kmers = []
for i in range(4**k):
digits = to_base4(i, k)
mystr = ""
for baseidx in digits:
mystr += bases[baseidx]
kmers.append(mystr)
return kmers
# convert num to a k-digit base-4 int
def to_base4(num, k):
digits = []
while k > 0:
digits.append(num/4**(k-1))
num %= 4**(k-1)
k -= 1
return digits
# Below: more from Nate
import random
import time
alphabet = ['A', 'C', 'G', 'T']
## Modulus based
def k_mer_mod(k):
k_mers = []
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4]+ k_mer
k_mers.append(k_mer)
return k_mers
## maybe the range operator slows things down by making a big tuple
def k_mer_mod_1(k):
k_mers = []
total = 4**k
i = 0
while i < total:
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4]+ k_mer
k_mers.append(k_mer)
i += 1
return k_mers
## Does initializing the list of k_mers help?
def k_mer_mod_2(k):
k_mers = [''] * 4**k
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4] + k_mer
k_mers[i] = k_mer
return k_mers
## What's faster? element assignment or hashing?
def k_mer_mod_set(k):
k_mers = set()
for i in range(4**k):
k_mer = ''
for j in range(k):
k_mer = alphabet[(i/4**j) % 4] + k_mer
k_mers.add(k_mer)
return list(k_mers)
## does creating the string up front help?
#def k_mer_mod_3(k):
#n k_mers = []
# k_mer = "N" * k
# for i in range(4**k):
# for j in range(k):
# k_mer[j] = alphabet[(i/4**j) % 4]
# k_mers.append(k_mer)
# return k_mers
# Nope! String are immutable, dummy!
# maybe we can do something tricky with string substitution
def k_mer_mod_ssub(k):
template = "\%s" * k
k_mers = []
for i in range(4**k):
k_mer = []
for j in range(k):
k_mer.append(alphabet[(i/4**j) % 4])
k_mers.append(template % k_mer)
return k_mers
# what about using a list?
def k_mer_mod_4(k):
k_mers = [''] * 4**k
k_mer = [''] * k
for i in range(4**k):
for j in range(k):
k_mer[j] = alphabet[(i/4**j) % 4]
k_mers[i] = "".join(k_mer)
return k_mers
## recursive version
def k_mer_recursive(k):
if k == 0:
return ['']
else:
k_mers = []
for k_mer in k_mer_recursive(k-1):
for n in alphabet:
k_mers.append("%s%s" % (k_mer, n))
return k_mers
## That works, but what I wanted to be like, really obnoxious about it
def k_mer_recursive_2(k):
if k == 0:
return ['']
else:
k_mers = []
[[k_mers.append("%s%s" % (k_mer, n)) for n in alphabet] for k_mer in k_mer_recursive_2(k-1)]
return k_mers
# using list instead of strings to store the k_mers
def k_mer_recursive_3(k, j = False):
if k == 0:
return [[]]
else:
k_mers = []
[[k_mers.append((k_mer + [n])) if j else k_mers.append("".join(k_mer + [n])) for n in alphabet] for k_mer in k_mer_recursive_3(k-1, True)]
return k_mers
## stochastic (I have a good feeling about this one!)
def k_mer_s(k):
s = set()
i = 0
while i < 4**k:
k_mer = ''
for j in range(k):
k_mer = k_mer + random.choice(alphabet)
if k_mer not in s:
s.add(k_mer)
i += 1
return list(s)
## I sure hope this works because now we're pretty much cheating
import array
def k_mer_mod_array(k):
k_mers = []
k_mer = array.array('c', ['N'] * k)
for i in range(4**k):
for j in range(k):
k_mer[j] = alphabet[(i/4**j) % 4]
k_mers.append("".join(k_mer))
return k_mers
## That could have gone better.
"""
Explanation: Bonus question: K-mer generation (+2 bonus points)
This question is optional, but if you complete it, I'll give you two bonus points. You won't lose points if you skip it.
Create a function called get_kmers that takes a single integer parameter, k, and returns a list of all possible k-mers of A/T/G/C. For example, if the supplied k was 2, you would generate all possible 2-mers, i.e. [AA, AT, AG, AC, TA, TT, TG, TC, GA, GT, GG, GC, CA, CT, CG, CC].
Notes:
- This function must be generic, in the sense that it can take any integer value of k and produce the corresponding set of k-mers.
- As there are $4^k$ possible k-mers for a given k, stick to smaller values of k for testing!!
- I have not really taught you any particularly obvious way to solve this problem, so feel free to get creative in your solution!
There are many ways to do this, and plenty of examples online. Since the purpose of this question is to practice problem solving, don't directly look up "k-mer generation"... try to figure it out yourself. You're free to look up more generic things, though.
End of explanation
"""
def nt_counts(seq):
counts = {}
for nt in seq:
if nt not in counts:
counts[nt] = 1
else:
counts[nt] += 1
return counts
nt_counts("AAAAATTTTTTTGGGGC")
"""
Explanation: Extra problems (0pts)
(A) Create a function that counts the number of occurences of each nt in a specified string. Your function should accept a nucleotide string as a parameter, and should return a dictionary with the counts of each nucleotide (where the nt is the key and the count is the value).
End of explanation
"""
def generate_nucleotide(length, freqs):
import random
seq = ""
samplingStr = ""
# maybe not the best way to do this, but fun:
# create a list with the indicated freq of nt
for nt in freqs:
occurPer1000 = int(1000*freqs[nt])
samplingStr += nt*occurPer1000
samplingList = list(samplingStr)
# sample from the list
for i in range(length):
newChar = random.choice(samplingList)
seq += newChar
return seq
generate_nucleotide(100, {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05})
# let's check if it's really working
n = 10000
testSeq = generate_nucleotide(n, {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05})
obsCounts = nt_counts(testSeq)
for nt in obsCounts:
print nt, float(obsCounts[nt]) / n
"""
Explanation: (B) Create a function that generates a random nt sequence of a specified length with specified nt frequencies. Your function should accept as parameters:
- a length
- a dictionary of nt frequences.
and should return the generated string. You'll need to figure out a way to use the supplied frequencies to generate the sequence.
An example of the nt freq dictionary could be: {'A':0.60, 'G':0.10, 'C':0.25, 'T':0.05}
End of explanation
"""
|
p-chambers/occ_airconics | examples/notebooks/notebook_examples.ipynb | bsd-3-clause | from airconics import LiftingSurface, Engine, Fuselage
import airconics.AirCONICStools as act
from airconics.Addons.WebServer.TornadoWeb import TornadoWebRenderer
from IPython.display import display
"""
Explanation: Notebook for Airconics examples
This IPython notebook contains examples for generating and rendering the AirCONICS parametric transonic airliner example using the interactive WebServer from PythonOCC-contrib. Parts are generated under their respective headings and rendered collectively in the final cells.
For examples using the pythonocc-core Qt viewer, refer to the airconics examples/core directory
End of explanation
"""
Propulsion = 1
EngineDia = 2.9
FuselageScaling = [55.902, 55.902, 55.902]
WingScaleFactor = 44.56
WingChordFactor = 1.0
Topology = 1
EngineSpanStation = 0.31
EngineCtrBelowLE = 0.3558
EngineCtrFwdOfLE = 0.9837
Scarf_deg = 3
# Derived Parameters
FuselageHeight = FuselageScaling[2]*0.105
FuselageLength = FuselageScaling[0]
FuselageWidth = FuselageScaling[1]*0.106
WingApex = [0.1748*FuselageLength,0,-0.0523*FuselageHeight]
# Fin:
FinChordFact = 1.01
FinScaleFact = WingScaleFactor/2.032
# TailPlane
TPChordFact = 1.01
TPScaleFact = WingScaleFactor * 0.388
# Engine:
NacelleLength = 1.95*EngineDia
"""
Explanation: Parameter Definitions
Parameters used here correspond to a geometry similar to that of the Boeing 787-8
End of explanation
"""
# Import all example functional definitions for the Common Research Model (CRM) Wing:
from airconics.examples.wing_example_transonic_airliner import *
# Position of the apex of the wing
P = WingApex
# Class definition
NSeg = 11
ChordFactor = 1
ScaleFactor = 50
# Generate (surface building is done during construction of the class)
Wing = LiftingSurface(P, mySweepAngleFunctionAirliner,
myDihedralFunctionAirliner,
myTwistFunctionAirliner,
myChordFunctionAirliner,
myAirfoilFunctionAirliner,
SegmentNo=NSeg,
ScaleFactor=WingScaleFactor,
ChordFactor=WingChordFactor)
RootChord = Wing.RootChord
# Display
renderer = TornadoWebRenderer()
Wing.Display(renderer)
display(renderer)
"""
Explanation: Wing, Transonic Airliner
Formulation of lifting surfaces in occ_airconics (and AirCONICS) follows the suggestions in Sobester [1] in which geometry--attached curvilinear functionals are used instead of parameters for shape definition. That is, $G(\textbf{f}, \textbf{X})$, where
$$\qquad \textbf{f} = \left[ f_1(\textbf{X}_1), f_2(\textbf{X}_2), ... f_m(\textbf{X}_m)\right],$$
and
$$\textbf{X}_i = \left[x_1^i, x_2^i,...\right], \forall i = 1,...m$$
as opposed to the conventional $G(\bf{X})$ formulation where the shape $G$ changes in response to changes in design parameters $\textbf{X}$. The functions $f_i$ are defined by:
$Sweep (\epsilon)$
$Chord (\epsilon)$
$Rotation (\epsilon)$
$Twist (\epsilon)$
$Airfoil (\epsilon)$
where $\epsilon$ represents the spanwise coordinate ranging from 0 at the root of the wing to 1 at the tip. Output of the airfoil function uses the airconics.primitives.Airfoil class here, which fits a NURBS curve to airfoil coordinates.
The following code demonstrates construction of a wing using built in examples for a transonic airliner wing and tailplane (below).
End of explanation
"""
from OCC.gp import gp_Ax1, gp_Pnt, gp_Dir
from airconics.examples.tailplane_example_transonic_airliner import *
# Position of the apex of the fin
P = [36.98-0.49-0.02, 0.0, 2.395-0.141]
SegmentNo = 10
Fin = liftingsurface.LiftingSurface(P, mySweepAngleFunctionFin,
myDihedralFunctionFin,
myTwistFunctionFin,
myChordFunctionFin,
myAirfoilFunctionFin,
SegmentNo=SegmentNo,
ChordFactor=FinChordFact,
ScaleFactor=FinScaleFact)
# Create the rotation axis centered at the apex point in the x direction
RotAxis = gp_Ax1(gp_Pnt(*P), gp_Dir(1, 0, 0))
Fin.RotateComponents(RotAxis, 90)
# Position of the apex of the tailplane
P = [43, 0.000, 1.633+0.02]
SegmentNo = 100
ChordFactor = 1.01
ScaleFactor = 17.3
TP = liftingsurface.LiftingSurface(P, mySweepAngleFunctionTP,
myDihedralFunctionTP,
myTwistFunctionTP,
myChordFunctionTP,
myAirfoilFunctionTP,
SegmentNo=SegmentNo,
ChordFactor=TPChordFact,
ScaleFactor=TPScaleFact)
# Display
renderer = TornadoWebRenderer()
Fin.Display(renderer)
TP.Display(renderer)
display(renderer)
"""
Explanation: Tailplane, Transonic Airliner
The same Lifting Surface class is used here to generate the fin and tailplane of the aircraft, using a different set of input functionals (also defined in airconics.examples).
End of explanation
"""
NoseLengthRatio=0.182
TailLengthRatio=0.293
Fus = Fuselage(NoseLengthRatio, TailLengthRatio, Scaling=FuselageScaling,
NoseCoordinates=[0., 0., 0],
CylindricalMidSection=False,
Maxi_attempt=5)
# Display
renderer = TornadoWebRenderer()
Fus.Display(renderer)
display(renderer)
# Export (can be commented out)
# act.export_STEPFile([Fus['OML']], 'fuselage.stp')
"""
Explanation: Fuselage Transonic Airliner
Fuselage shapes are created following the parameterisation used in Sobester [2]. That is, the outer mould line (OML) is split into a Nose, Central and Tail section, the length of which is described on input to Fuselage class as a percentage of the total length. Rib curves are then formed by fitting a NURBS curve to the intersection points of sectional planar cuts and the guide curves of the extremeties of the OML e.g. Port, top and bottom curves. The OML is fitted in occ_airconics using the Open CASCADE ThruSections loft.
End of explanation
"""
# WingBodyFairing - A simple ellipsoid:
from airconics.base import AirconicsShape
WTBFZ = RootChord*0.009 #787: 0.2
WTBFheight = 1.8*0.1212*RootChord #787:2.7
WTBFwidth = 1.08*FuselageWidth
WTBFXCentre = WingApex[0] + RootChord/2.0 + RootChord*0.1297 # 787: 23.8
WTBFlength = 1.167*RootChord #787:26
WBF_shape = act.make_ellipsoid([WTBFXCentre, 0, WTBFZ], WTBFlength, WTBFwidth, WTBFheight)
WBF = AirconicsShape(components={'WBF': WBF_shape})
"""
Explanation: Wing-Body Fairing:
The wing-body fairing is here created as a simple ellipsoid shape around the root section of the wing.
Note that this component will be displayed only in the final model.
End of explanation
"""
EngineSection, HChord = act.CutSect(Wing['Surface'], EngineSpanStation)
Chord = HChord.GetObject()
CEP = Chord.EndPoint()
Centreloc = [CEP.X()-EngineCtrFwdOfLE*NacelleLength,
CEP.Y(),
CEP.Z()-EngineCtrBelowLE*NacelleLength]
eng = Engine(HChord,
CentreLocation=Centreloc,
ScarfAngle=Scarf_deg,
HighlightRadius=EngineDia/2.0,
MeanNacelleLength=NacelleLength)
# Display
renderer = TornadoWebRenderer()
eng.Display(renderer)
display(renderer)
"""
Explanation: Engine + Pylon
First, obtain the wing section and chord at which the engine will be fitted, then fit then engine. The default inputs to the Engine class produce a turbofan engine with Nacelle similar to that of the RR Trent 1000 / GEnx and its pylon (Currently a flat plate only).
End of explanation
"""
# Trim the inboard section of the main wing:
CutCirc = act.make_circle3pt([0,WTBFwidth/4.,-45], [0,WTBFwidth/4.,45], [90,WTBFwidth/4.,0])
CutCircDisk = act.PlanarSurf(CutCirc)
Wing['Surface'] = act.TrimShapebyPlane(Wing['Surface'], CutCircDisk)
#Mirror the main wing and tailplane using class methods:
Wing2 = Wing.MirrorComponents(plane='xz')
TP2 = TP.MirrorComponents(plane='xz')
eng2 = eng.MirrorComponents(plane='xz')
"""
Explanation: Miscelaneous operations
End of explanation
"""
renderer = TornadoWebRenderer()
# display all entities:
# Fuselage and wing-body fairing
Fus.Display(renderer)
WBF.Display(renderer)
# #The Wings:
Wing.Display(renderer)
Wing2.Display(renderer)
#The Tailplane:
TP.Display(renderer)
TP2.Display(renderer)
#The Fin:
Fin.Display(renderer)
#The Engines:
eng.Display(renderer)
eng2.Display(renderer)
# Finally show the renderer
display(renderer)
"""
Explanation: Ipython Cell Renderer:
End of explanation
"""
from airconics import Topology
from IPython.display import Image
import pydot
topo_renderer = TornadoWebRenderer()
topo = Topology()
# Note: no checks are done on the validity of the tree yet,
topo.AddPart(Fus, 'Fuselage', 3)
topo.AddPart(Fin, 'Fin', 0)
# Need to add a mirror plane here, arity zero
from OCC.gp import gp_Ax2, gp_Dir, gp_Pnt
xz_pln = gp_Ax2(gp_Pnt(0, 0, 0), gp_Dir(0, 1, 0))
topo.AddPart(xz_pln, 'Mirror', 0)
# These are the mirrored entities, with their arities
topo.AddPart(TP, 'Tail Plane', 0)
topo.AddPart(Wing, 'Wing', 1)
topo.AddPart(eng, 'Engine', 0)
# print the Topology (resembles a LISP tree)
print(topo)
# Create the graph with pydot
graph = pydot.graph_from_dot_data(topo.export_graphviz())
Image(graph.create_png())
# This line will mirror geometry 'under' (added after) the mirror plane
topo.Build()
topo.Display(topo_renderer)
display(topo_renderer)
"""
Explanation: Development
Topology model
This is a work in progress towards a topologically flexible model based on the tree-type definition described in Sobester [1]. Note the geometry is not currently defined by the tree however, the tree is simply stored as a result of adding components - this is for demonstration only, and the process is yet to be automated.
The $xz$ mirror plane is included in this representation, between central objects (Fuselage, Fin) and the mirrored objects (Tail Plane, Wing, Engine).
End of explanation
"""
# Setup
# Create mock components, without generating any geometry
fus = Fuselage(construct_geometry=False)
engine = Engine(construct_geometry=False)
fin = LiftingSurface(construct_geometry=False)
mirror_pln = gp_Ax2()
wing = LiftingSurface(construct_geometry=False)
Vfin = LiftingSurface(construct_geometry=False)
# For now we must manually add parts and affinities
topo = Topology()
topo.AddPart(fus, 'Fuselage', 4)
topo.AddPart(engine, 'engine', 0)
topo.AddPart(fin, 'fin', 0)
topo.AddPart(mirror_pln, 'mirror_pln', 0)
topo.AddPart(wing, 'wing', 0)
topo.AddPart(Vfin, 'V-Fin', 0)
print(topo)
graph = pydot.graph_from_dot_data(topo.export_graphviz())
Image(graph.create_png())
"""
Explanation: Let's try some further tests to the topology class representation using some other examples. For now, these are empty geometries, and inputs to the Fuselage, LiftingSurface and Engine classes are not yet included in the Topology tree.
Predator UAV
Photo source: US Air Force
End of explanation
"""
# Setup
# Create mock components, without generating any geometry
fus = Fuselage(construct_geometry=False)
mirror_pln = gp_Ax2()
engine = Engine(construct_geometry=False)
wing = LiftingSurface(construct_geometry=False)
tailplane = LiftingSurface(construct_geometry=False)
tail_fin = LiftingSurface(construct_geometry=False)
topo = Topology()
topo.AddPart(fus, 'Fuselage', 3)
topo.AddPart(mirror_pln, 'mirror', 0)
topo.AddPart(engine, 'powerplant', 0)
topo.AddPart(tailplane, 'Tailplane', 1)
topo.AddPart(tail_fin, "Tail fin", 0)
topo.AddPart(wing, "wing", 0)
print(topo)
graph = pydot.graph_from_dot_data(topo.export_graphviz())
Image(graph.create_png())
"""
Explanation: Fairchild Republic A-10 Thunderbolt
Photo source: Airman Magazine 1999
End of explanation
"""
# Setup
# Create mock components, without generating any geometry
fus = Fuselage(construct_geometry=False)
mirror_pln = gp_Ax2()
engine = Engine(construct_geometry=False)
wing_in = LiftingSurface(construct_geometry=False)
tailplane = LiftingSurface(construct_geometry=False)
pod = Fuselage(construct_geometry=False)
finup = LiftingSurface(construct_geometry=False)
findown = LiftingSurface(construct_geometry=False)
wing_out = LiftingSurface(construct_geometry=False)
topo = Topology()
topo.AddPart(fus, 'Fuselage', 3)
topo.AddPart(mirror_pln, 'mirror', 0)
topo.AddPart(engine, 'powerplant', 0)
topo.AddPart(wing, "wing", 0)
topo.AddPart(wing_in, "TP/inbbd wing", 1)
topo.AddPart(pod, 'Pod/tail boom', 3)
topo.AddPart(wing_out, "outbd wing", 0)
topo.AddPart(finup, "Fin (up)", 0)
topo.AddPart(findown, "Fin (down)", 0)
for node in topo._Tree:
print(node)
graph = pydot.graph_from_dot_data(topo.export_graphviz())
Image(graph.create_png())
"""
Explanation: Scaled Composites Proteus
Photo source: NASA
End of explanation
"""
|
Pybonacci/notebooks | Explorando el Planeta Nueve con Python usando poliastro.ipynb | bsd-2-clause | !conda install -qy poliastro --channel poliastro # Instala las dependencias con conda
!pip uninstall poliastro -y
#!pip install -e /home/juanlu/Development/Python/poliastro.org/poliastro
!pip install https://github.com/poliastro/poliastro/archive/planet9-fixes.zip # Instala la versiรณn de desarrollo
%load_ext version_information
%version_information numpy, astropy, scipy, matplotlib, numba, poliastro
"""
Explanation: Introducciรณn
"Sometimes I think, how lucky we are to live in this time, the first moment in human history when we are, in fact visiting other worlds and engaging in a deep reconnaissance of the cosmos" โ Carl Sagan
Cuando aรบn resonaban los ecos de la ยซexpulsiรณnยป de Plutรณn de nuestro sistema planetario (o mรกs bien, de su descenso a la divisiรณn de los planetas enanos), de repente dos cientรญficos del Instituto de Tecnologรญa de California (Caltech para los amigos) publican un artรญculo en el que hipotetizan la existencia de un planeta mรกs masivo que la Tierra mucho mรกs allรก de la รณrbita de Neptuno. Batygin y Brown, los responsables de la investigaciรณn, han bautizado a su aรบn no observado descubrimiento como Planeta Nueve. No he podido evitar acordarme de la cita de Carl Sagan mientras escribรญa este artรญculo :)
Para los detalles de este fantรกstico avance y un anรกlisis de sus implicaciones os remito a los excelentes artรญculos de Daniel Marรญn en Eurekablog acerca del "Planeta Nueve". En Pybonacci vamos a aportar nuestro granito de arena, y como apasionados de la astronomรญa y del software libre que somos, vamos a darle una pasada al artรญculo original de Batygin y Brown (disponible libremente en PDF) y jugar con los datos que ofrece como mรกs nos gusta: usando Python ;)
Nota: El anรกlisis que se plantea a continuaciรณn no tiene el debido rigor cientรญfico y en ningรบn caso debe tomarse como un punto de partida para una bรบsqueda seria del Planeta Nueve. Dicho lo cual, si alguien lo encuentra gracias a este artรญculo por lo menos que me invite a un cafรฉ :D
Python + รrbitas = poliastro
Para este anรกlisis vamos a utilizar poliastro, una biblioteca Python para astrodinรกmica interplanetaria que yo mismo estoy desarrollando. poliastro nos permitirรก hacer cรกlculos con los elementos orbitales de los cuerpos que analicemos, y sus dependencias nos ayudarรกn en la tarea:
Gracias a astropy podremos hacer conversiones entre sistemas de referencia, utilizar unidades fรญsicas y manejar tiempos con rigor astronรณmico (algo nada fรกcil).
La biblioteca jplephem nos permitirรก utilizar las efemรฉrides del Jet Propulsion Laboratory de la NASA para localizar la posiciรณn de los planetas.
Ademรกs, y como es natural utilizaremos NumPy, SciPy y matplotlib. Para instalar todo lo necesario solo necesitas ejecutar un comando:
$ conda install poliastro --channel poliastro
Y tambiรฉn usando pip (porque en el fondo me caรฉis bien):
$ pip install numpy scipy matplotlib astropy jplephem poliastro
(Nรณtese que numba es opcional)
Sin embargo, escribiendo este artรญculo me he encontrado con cosas que no funcionaban y que he tenido que arreglar sobre la marcha, asรญ que utilizarรฉ una rama de desarrollo con algunos arreglos temporales:
End of explanation
"""
%matplotlib inline
import matplotlib
matplotlib.style.use('pybonacci') # https://gist.github.com/Juanlu001/edb2bf7b583e7d56468a
import matplotlib.pyplot as plt
import numpy as np
from astropy import time
from astropy import units as u
from poliastro.bodies import Sun
from poliastro.twobody import angles, State
from poliastro import ephem
from poliastro.plotting import plot, OrbitPlotter
epoch = time.Time("2015-01-24 12:00", scale="utc").tdb
"""
Explanation: La รณrbita del Planeta Nueve
No cuesta nada repetirlo: no se conoce la รณrbita del Planeta Nueve. Batygin y Brown en sus anรกlisis lo que han hecho ha sido, parafraseando a Daniel Marรญn, estrechar el cerco en torno a รฉl. Sin embargo, en el artรญculo trabajan con valores caracterรญsticos y eso sรญ podemos aprovecharlo - hasta cierto punto, como en seguida veremos.
En el resumen del artรญculo tenemos esta frase:
We find that the observed orbital alignment can be maintained by a distant eccentric planet with mass $\gtrsim 10 m_{\bigoplus}$ whose orbit lies in approximately the same plane as those of the distant KBOs, but whose perihelion
is $180^{\circ}$ away from the perihelia of the minor bodies. [รnfasis mรญo]
Al final del artรญculo, en la figura 9, tenemos los datos de la รณrbita tentativa utilizada para las simulaciones:
Ascensiรณn del nodo $\Omega = 100^{\circ}$
Argumento del perigeo $\omega = 150^{\circ}$
Inclinaciรณn $i = 30^{\circ}$
Semieje mayor $a = 700~\text{AU}$
Excentricidad $e = 0.6$
(La masa que tenga el planeta no nos afecta)
Con esto ya podrรญamos representar la forma de la รณrbita, pero nos falta un parรกmetro crucial... ยฟdรณnde estรก el Planeta Nueve dentro de esta รณrbita? Si lo supiรฉramos no estarรญamos hablando de especulaciones. Averiguarlo no es fรกcil, porque con este semieje mayor estamos hablando de unos perรญodos larguรญsimos. ยกManos a la obra!
End of explanation
"""
a = 700 * u.AU
ecc=0.6 * u.one
inc=30 * u.deg
raan=100 * u.deg
argp=150 * u.deg
nu=180 * u.deg # ยกSolo para probar!
planet9 = State.from_classical(Sun, a, ecc, inc, raan, argp, nu, # Solo para probar
epoch)
period = planet9.period.to(u.year)
period
"""
Explanation: Vamos a crear un objeto State para representar Planeta Nueve, aรฑadiendo a los parรกmetros estimados del artรญculo un valor de la anomalรญa verdadera de $180^{\circ}$, es decir: en el punto mรกs alejado (caso peor). El perรญodo de la รณrbita serรก:
End of explanation
"""
plot(planet9)
"""
Explanation: Habรฉis leรญdo bien: este planeta tardarรญa casi 20 000 aรฑos en dar una vuelta alrededor del Sol. Por comparar, el perรญodo del cometa Halley es de 75 aรฑos, y el de Plutรณn es de unos 250 aรฑos.
Para visualizar la forma de la รณrbita no tenemos mรกs que usar la funciรณn plot:
End of explanation
"""
from matplotlib.patches import Wedge, PathPatch
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
# Transformamos las anomalรญas medias de 90 y 270 grados en
# anomalรญas verdaderas
nu_lower = angles.M_to_nu(1 * np.pi * u.rad / 2, planet9.ecc)
nu_upper = angles.M_to_nu(3 * np.pi * u.rad / 2, planet9.ecc)
# Regiones equiprobables
fifty_far = Wedge(
(0, 0), planet9.r_a.to(u.km).value,
nu_lower.to(u.deg).value, nu_upper.to(u.deg).value,
color='#cccccc',
zorder=0
)
fifty_close = Wedge(
(0, 0), planet9.r_a.to(u.km).value,
nu_upper.to(u.deg).value, nu_lower.to(u.deg).value,
color='#999999',
zorder=0
)
"""
Explanation: (Mรกs abajo la representaremos junto a las รณrbitas de los planetas conocidos, pero la escala de la grรกfica ya da una idea de las distancias que manejamos)
Pero es que aรบn hay mรกs: la รณrbita del Planeta Nueve serรญa bastante excรฉntrica, y por la segunda ley de Kepler la mayor parte del tiempo estarรก en la parte mรกs alejada. Vamos a intentar visualizar las consecuencias de esta tercera ley pintando dos regiones "equiprobables" de la รณrbita, es decir: el Planeta Nueve estarรก el 50 % del tiempo cada una de ellas.
End of explanation
"""
# Recuperamos la รณrbita de la Tierra para comparar
r_earth, v_earth = ephem.planet_ephem(ephem.EARTH, epoch)
earth = State.from_vectors(Sun, r_earth.to(u.km), v_earth.to(u.km / u.s), epoch)
# Y ya que nos ponemos, รณrbita de Neptuno
r_nep, v_nep = ephem.planet_ephem(ephem.PLUTO, epoch)
neptune = State.from_vectors(Sun, r_nep.to(u.km), v_nep.to(u.km / u.s), epoch)
"""
Explanation: Para tener algo con lo que comparar vamos a pintar tambiรฉn las รณrbitas de la Tierra y Neptuno. Para ello poliastro utilizarรก unos ficheros llamados SPK que contienen informaciรณn precisa sobre las รณrbitas de los planetas del sistema solar.
End of explanation
"""
# Creamos la figura
fig, ax = plt.subplots(figsize=(8, 8))
op = OrbitPlotter(ax)
planet9_point, planet9_orbit = op.plot(planet9)
planet9_point.set_color("#6600ff")
planet9_orbit.set_color("#6600ff")
# Enmascaramos los sectores circulares con la รณrbita
mask = PathPatch(planet9_orbit.get_path(), fc='none', lw=0)
ax.add_patch(mask)
ax.add_patch(fifty_far)
ax.add_patch(fifty_close)
fifty_far.set_clip_path(mask)
fifty_close.set_clip_path(mask)
# Zoom en el sistema Solar
ax_zoom = zoomed_inset_axes(ax, 8, loc=3, axes_kwargs={'axisbg': '#fafafa'})
# Repetimos algunos plots
op_zoom = OrbitPlotter(ax_zoom)
op_zoom.set_frame(*planet9.pqw())
earth_point, earth_orbit = op_zoom.plot(earth)
nepune_point, _ = op_zoom.plot(neptune)
earth_orbit.set_linestyle("solid") # ยกPara que se vea algo!
# Propiedades de la secciรณn aumentada
ax_zoom.set_xlim(-7e9, 5e9)
ax_zoom.set_ylim(-4e9, 5e9)
ax_zoom.set_xticks([])
ax_zoom.set_yticks([])
ax_zoom.set_xlabel("")
ax_zoom.set_ylabel("")
ax_zoom.grid(False)
ax_zoom.set_title("8x zoom")
mark_inset(ax, ax_zoom, loc1=1, loc2=4, fc="none", ec='0.3')
# Leyenda de la grรกfica
leg = ax.legend(
[planet9_point, earth_point, nepune_point, fifty_close, fifty_far],
["Planeta 9", "Tierra", "Neptuno", "Perihelio", "Afelio"],
numpoints=1
)
leg.get_frame().set_facecolor('#fafafa')
"""
Explanation: Para el resto tendremos que jugar un poco con matplotlib y las funciones de plotting que proporciona poliastro.
End of explanation
"""
from poliastro import iod
from poliastro.util import norm
date_launch = time.Time('2016-02-01 12:00', scale='utc').tdb
time_of_flight = 200 * u.year
date_arrival = date_launch + time_of_flight
r_0, v_earth = ephem.planet_ephem(ephem.EARTH, date_launch)
r_f = planet9.propagate(time_of_flight).r
v_0, v_f = iod.lambert(Sun.k, r_0, r_f, time_of_flight, rtol=5)
"""
Explanation: En este grรกfico se aprecian dos cosas:
Como hemos dicho antes, el Planeta Nueve pasa la mitad de su perรญodo en la parte mรกs alejada del Sol (y por tanto, de nosotros), el afelio (sector gris claro). En cambio, la otra mitad del tiempo se la pasa acelerando hacia el perihelio. Es evidente entonces que lo mรกs probable es que el planeta estรฉ mรกs lejos que cerca.
Y hablando de lejos: estรก muy lejos. Extremadamente lejos. Por supuesto estรก cerca comparado con Alfa Centauri, pero precisamente lo novedoso de este caso es que nadie esperaba encontrar un planeta con un tamaรฑo tan considerable a una distancia tan grande.
Otra forma de comprobar la enormidad de estas distancias es tratando de averiguar cuรกnto nos costarรญa llegar hasta allรญ. ยกSigue leyendo!
ยฟCรณmo llegamos hasta allรญ?
<div class="alert alert-warning">**Importante**: Estos resultados son provisionales y estรกn pendientes de revisiรณn por problemas con el algoritmo, que no converge bien. De hecho se observa que he tenido que especificar una tolerancia relativa desmesurada. Despuรฉs del tiempo dedicado he preferido publicar el cรณdigo, que al menos muestra cรณmo utilizar la biblioteca, a pesar de que haya hecho aflorar sus limitaciones. [Ya he abierto una incidencia en poliastro](https://github.com/poliastro/poliastro/issues/112) para ver dรณnde estรก el problema, aunque posiblemente se deba a la fuerte inestabilidad numรฉrica que supone acertar una diana que estรก a 168 000 000 000 km.</div>
Ahora es donde empieza de verdad la ciencia ficciรณn. Vamos a estudiar algunas trayectorias posibles que podrรญamos seguir para llegar a este hipotรฉtico Planeta Nueve, aunque lamentablemente obtengamos tiempos de cientos de aรฑos o velocidades inalcanzables con nuestra tecnologรญa :)
Primera opciรณn: Viaje directo
Supongamos que lanzamos la nave la semana que viene (no hay tiempo que perder) y que nos planteรกsemos llegar en un tiempo optimista: 200 aรฑos. Supongamos tambiรฉn que en el momento del lanzamiento tenemos al Planeta Nueve en su afelio, que de hecho es el caso peor. El problema de calcular quรฉ velocidad necesitamos para llegar se llama problema de Lambert, y con poliastro lo podemos resolver.
End of explanation
"""
(norm(v_0 - v_earth)).to(u.km / u.h)
"""
Explanation: Veamos la magnitud de la velocidad de partida:
End of explanation
"""
traj1 = State.from_vectors(
Sun,
r_0.to(u.km),
v_0.to(u.km / u.s),
date_launch
)
op = OrbitPlotter(num_points=10000)
op.plot(planet9.propagate(time_of_flight))
#op.plot(earth)
plt.gca().set_autoscale_on(False)
op.plot(traj1)
"""
Explanation: No es demasiado descabellada, teniendo en cuenta que la velocidad de lanzamiento de New Horizons (que llegรณ a Plutรณn en menos de 10 aรฑos) fue de casi 60 000 km/h. Veamos ahora esta trayectoria:
End of explanation
"""
from poliastro.maneuver import Maneuver
hoh = Maneuver.hohmann(earth, 38e6 * u.km)
hoh.get_total_cost()
"""
Explanation: Prรกcticamente en lรญnea recta, ยกcomo si fuese esto una autopista! Lamentablemente 200 aรฑos es demasiado tiempo, y si intentamos reducirlo los requisitos de velocidad solo empeorarรญan. Veamos otras opciรณn un poco mรกs loca.
Efecto Oberth con el Sol
Al leer esta posibilidad no he podido evitar acordarme de la entraรฑable viรฑeta de XKCD:
El efecto Oberth es el hecho de que las maniobras impulsivas son mรกs eficientes cuanto mรกs rรกpido va el vehรญculo. Gracias a esto se podrรญa diseรฑar una trayectoria que pasase muy cerca del Sol y que efectuase un gran impulso en el perihelio. La secuencia de la viรฑeta es demasiado complicada asรญ que vamos a dividir este recorrido en dos partes:
Una transferencia de Hohmann a 38 millones de kilรณmetros del Sol, y
un impulso para alcanzar la รณrbita del Planeta Nueve en 100 aรฑos.
Veremos quรฉ nรบmeros obtenemos ahora:
End of explanation
"""
interm = earth.apply_maneuver(hoh)
perih = interm.propagate(interm.period / 2)
norm(perih.r)
norm(perih.v)
"""
Explanation: El frenazo que tenemos que pegar es considerable. Viajemos ahora al perihelio y efectuemos la transferencia.
End of explanation
"""
v_i, _ = iod.lambert(Sun.k, perih.r.to(u.km), planet9.r.to(u.km), 100 * u.year, rtol=12) # De nuevo, tolerancia demasiado grande
norm(v_i)
"""
Explanation: La diferencia de radios se debe a que no partรญamos de una รณrbita circular. Nos hemos saltado el paso de circularizar la รณrbita para simplificar.
Por รบltimo, rezamos para que el algoritmo del problema de Lambert converja y nos lleve hasta el Planeta Nueve:
End of explanation
"""
hoh.get_total_cost() + norm(v_i - perih.v)
"""
Explanation: Y el requisito total de velocidad serรก:
End of explanation
"""
op = OrbitPlotter(num_points=10000)
op.plot(earth)
op.plot(interm)
op.plot(perih)
plt.gca().set_autoscale_on(False)
#op.plot(planet9)
op.plot(State.from_vectors(Sun, perih.r, v_i))
"""
Explanation: Caramba, ยกmucho mรกs alto que antes! Intentamos pintarlo todo:
End of explanation
"""
|
davofis/computational_seismology | 05_pseudospectral/ps_derivative_solution.ipynb | gpl-3.0 | # Import all necessary libraries, this is a configuration step for the exercise.
# Please run it before the simulation code!
import numpy as np
import matplotlib.pyplot as plt
# Show the plots in the Notebook.
plt.switch_backend("nbagg")
"""
Explanation: <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
<div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
<div style="position: relative ; top: 50% ; transform: translatey(-50%)">
<div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
<div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Numerical derivatives based on the Fourier Transform</div>
</div>
</div>
</div>
Seismo-Live: http://seismo-live.org
Authors:
Fabian Linder (@fablindner)
Heiner Igel (@heinerigel)
David Vargas (@dvargas)
Basic Equations
The derivative of function $f(x)$ with respect to the spatial coordinate $x$ is calculated using the differentiation theorem of the Fourier transform:
\begin{equation}
\frac{d}{dx} f(x) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} ik F(k) e^{ikx} dk
\end{equation}
In general, this formulation can be extended to compute the nโth derivative of $f(x)$ by considering that $F^{(n)}(k) = D(k)^{n}F(k) = (ik)^{n}F(k)$. Next, the inverse Fourier transform is taken to return to physical space.
\begin{equation}
f^{(n)}(x) = \mathscr{F}^{-1}[(ik)^{n}F(k)] = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} (ik)^{n} F(k) e^{ikx} dk
\end{equation}
End of explanation
"""
def fourier_derivative(f, dx):
# Length of vector f
nx = np.size(f)
# Initialize k vector up to Nyquist wavenumber
kmax = np.pi/dx
dk = kmax/(nx/2)
k = np.arange(float(nx))
k[: int(nx/2)] = k[: int(nx/2)] * dk
k[int(nx/2) :] = k[: int(nx/2)] - kmax
# Fourier derivative
ff = np.fft.fft(f); ff = 1j*k*ff
df_num = np.real(np.fft.ifft(ff))
return df_num
"""
Explanation: Exercise 1
Define a python function call "fourier_derivative(f, dx)" that compute the first derivative of a function $f$ using the Fourier transform properties.
End of explanation
"""
# Basic parameters
# ---------------------------------------------------------------
nx = 128
x, dx = np.linspace(2*np.pi/nx, 2*np.pi, nx, retstep=True)
sigma = 0.5
xo = np.pi
# Initialize Gauss function
f = np.exp(-1/sigma**2 * (x - xo)**2)
# Numerical derivative
df_num = fourier_derivative(f, dx)
# Analytical derivative
df_ana = -2*(x-xo)/sigma**2 * np.exp(-1/sigma**2 * (x-xo)**2)
# To make the error visible, it is multiply by 10^13
df_err = 1e13*(df_ana - df_num)
# Error between analytical and numerical solution
err = np.sum((df_num - df_ana)**2) / np.sum(df_ana**2) * 100
print('Error: %s' %err)
"""
Explanation: Exercise 2
Calculate the numerical derivative based on the Fourier transform to show that the derivative is exact. Define an arbitrary function (e.g. a Gaussian) and initialize its analytical derivative on the same spatial grid. Calculate the numerical derivative and the difference to the analytical solution. Vary the wavenumber content of the analytical function. Does it make a difference? Why is the numerical result not entirely exact?
End of explanation
"""
# Plot analytical and numerical derivatives
# ---------------------------------------------------------------
plt.subplot(2,1,1)
plt.plot(x, f, "g", lw = 1.5, label='Gaussian')
plt.legend(loc='upper right', shadow=True)
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.axis([2*np.pi/nx, 2*np.pi, 0, 1])
plt.subplot(2,1,2)
plt.plot(x, df_ana, "b", lw = 1.5, label='Analytical')
plt.plot(x, df_num, 'k--', lw = 1.5, label='Numerical')
plt.plot(x, df_err, "r", lw = 1.5, label='Difference')
plt.legend(loc='upper right', shadow=True)
plt.xlabel('$x$')
plt.ylabel('$\partial_x f(x)$')
plt.axis([2*np.pi/nx, 2*np.pi, -2, 2])
plt.show()
#plt.savefig('Fig_5.9.png')
"""
Explanation: Exercise 3
Now that the numerical derivative is available, we can visually inspect our results. Make a plot of both, the analytical and numerical derivatives together with the difference error.
End of explanation
"""
|
dfm/emcee | docs/tutorials/parallel.ipynb | mit | %config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
import multiprocessing
multiprocessing.set_start_method("fork")
"""
Explanation: (parallel)=
Parallelization
End of explanation
"""
import os
os.environ["OMP_NUM_THREADS"] = "1"
"""
Explanation: :::{note}
Some builds of NumPy (including the version included with Anaconda) will automatically parallelize some operations using something like the MKL linear algebra. This can cause problems when used with the parallelization methods described here so it can be good to turn that off (by setting the environment variable OMP_NUM_THREADS=1, for example).
:::
End of explanation
"""
import time
import numpy as np
def log_prob(theta):
t = time.time() + np.random.uniform(0.005, 0.008)
while True:
if time.time() >= t:
break
return -0.5 * np.sum(theta**2)
"""
Explanation: With emcee, it's easy to make use of multiple CPUs to speed up slow sampling.
There will always be some computational overhead introduced by parallelization so it will only be beneficial in the case where the model is expensive, but this is often true for real research problems.
All parallelization techniques are accessed using the pool keyword argument in the :class:EnsembleSampler class but, depending on your system and your model, there are a few pool options that you can choose from.
In general, a pool is any Python object with a map method that can be used to apply a function to a list of numpy arrays.
Below, we will discuss a few options.
In all of the following examples, we'll test the code with the following convoluted model:
End of explanation
"""
import emcee
np.random.seed(42)
initial = np.random.randn(32, 5)
nwalkers, ndim = initial.shape
nsteps = 100
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob)
start = time.time()
sampler.run_mcmc(initial, nsteps, progress=True)
end = time.time()
serial_time = end - start
print("Serial took {0:.1f} seconds".format(serial_time))
"""
Explanation: This probability function will randomly sleep for a fraction of a second every time it is called.
This is meant to emulate a more realistic situation where the model is computationally expensive to compute.
To start, let's sample the usual (serial) way:
End of explanation
"""
from multiprocessing import Pool
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool)
start = time.time()
sampler.run_mcmc(initial, nsteps, progress=True)
end = time.time()
multi_time = end - start
print("Multiprocessing took {0:.1f} seconds".format(multi_time))
print("{0:.1f} times faster than serial".format(serial_time / multi_time))
"""
Explanation: Multiprocessing
The simplest method of parallelizing emcee is to use the multiprocessing module from the standard library.
To parallelize the above sampling, you could update the code as follows:
End of explanation
"""
from multiprocessing import cpu_count
ncpu = cpu_count()
print("{0} CPUs".format(ncpu))
"""
Explanation: I have 4 cores on the machine where this is being tested:
End of explanation
"""
with open("script.py", "w") as f:
f.write("""
import sys
import time
import emcee
import numpy as np
from schwimmbad import MPIPool
def log_prob(theta):
t = time.time() + np.random.uniform(0.005, 0.008)
while True:
if time.time() >= t:
break
return -0.5*np.sum(theta**2)
with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit(0)
np.random.seed(42)
initial = np.random.randn(32, 5)
nwalkers, ndim = initial.shape
nsteps = 100
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool)
start = time.time()
sampler.run_mcmc(initial, nsteps)
end = time.time()
print(end - start)
""")
mpi_time = !mpiexec -n {ncpu} python script.py
mpi_time = float(mpi_time[0])
print("MPI took {0:.1f} seconds".format(mpi_time))
print("{0:.1f} times faster than serial".format(serial_time / mpi_time))
"""
Explanation: We don't quite get the factor of 4 runtime decrease that you might expect because there is some overhead in the parallelization, but we're getting pretty close with this example and this will get even closer for more expensive models.
MPI
Multiprocessing can only be used for distributing calculations across processors on one machine.
If you want to take advantage of a bigger cluster, you'll need to use MPI.
In that case, you need to execute the code using the mpiexec executable, so this demo is slightly more convoluted.
For this example, we'll write the code to a file called script.py and then execute it using MPI, but when you really use the MPI pool, you'll probably just want to edit the script directly.
To run this example, you'll first need to install the schwimmbad library because emcee no longer includes its own MPIPool.
End of explanation
"""
def log_prob_data(theta, data):
a = data[0] # Use the data somehow...
t = time.time() + np.random.uniform(0.005, 0.008)
while True:
if time.time() >= t:
break
return -0.5 * np.sum(theta**2)
data = np.random.randn(5000, 200)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob_data, args=(data,))
start = time.time()
sampler.run_mcmc(initial, nsteps, progress=True)
end = time.time()
serial_data_time = end - start
print("Serial took {0:.1f} seconds".format(serial_data_time))
"""
Explanation: There is often more overhead introduced by MPI than multiprocessing so we get less of a gain this time.
That being said, MPI is much more flexible and it can be used to scale to huge systems.
Pickling, data transfer & arguments
All parallel Python implementations work by spinning up multiple python processes with identical environments then and passing information between the processes using pickle.
This means that the probability function must be picklable.
Some users might hit issues when they use args to pass data to their model.
These args must be pickled and passed every time the model is called.
This can be a problem if you have a large dataset, as you can see here:
End of explanation
"""
with Pool() as pool:
sampler = emcee.EnsembleSampler(
nwalkers, ndim, log_prob_data, pool=pool, args=(data,)
)
start = time.time()
sampler.run_mcmc(initial, nsteps, progress=True)
end = time.time()
multi_data_time = end - start
print("Multiprocessing took {0:.1f} seconds".format(multi_data_time))
print(
"{0:.1f} times faster(?) than serial".format(
serial_data_time / multi_data_time
)
)
"""
Explanation: We basically get no change in performance when we include the data argument here.
Now let's try including this naively using multiprocessing:
End of explanation
"""
def log_prob_data_global(theta):
a = data[0] # Use the data somehow...
t = time.time() + np.random.uniform(0.005, 0.008)
while True:
if time.time() >= t:
break
return -0.5 * np.sum(theta**2)
with Pool() as pool:
sampler = emcee.EnsembleSampler(
nwalkers, ndim, log_prob_data_global, pool=pool
)
start = time.time()
sampler.run_mcmc(initial, nsteps, progress=True)
end = time.time()
multi_data_global_time = end - start
print(
"Multiprocessing took {0:.1f} seconds".format(multi_data_global_time)
)
print(
"{0:.1f} times faster than serial".format(
serial_data_time / multi_data_global_time
)
)
"""
Explanation: Brutal.
We can do better than that though.
It's a bit ugly, but if we just make data a global variable and use that variable within the model calculation, then we take no hit at all.
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/548b4fc45f1ed79527138879cd79d3c8/muscle_detection.ipynb | bsd-3-clause | # Authors: Adonay Nunes <adonay.s.nunes@gmail.com>
# Luke Bloy <luke.bloy@gmail.com>
# License: BSD-3-Clause
import os.path as op
import matplotlib.pyplot as plt
import numpy as np
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
from mne.preprocessing import annotate_muscle_zscore
# Load data
data_path = bst_auditory.data_path()
raw_fname = op.join(data_path, 'MEG', 'bst_auditory', 'S01_AEF_20131218_01.ds')
raw = read_raw_ctf(raw_fname, preload=False)
raw.crop(130, 160).load_data() # just use a fraction of data for speed here
raw.resample(300, npad="auto")
"""
Explanation: Annotate muscle artifacts
Muscle contractions produce high frequency activity that can mask brain signal
of interest. Muscle artifacts can be produced when clenching the jaw,
swallowing, or twitching a cranial muscle. Muscle artifacts are most
noticeable in the range of 110-140 Hz.
This example uses :func:~mne.preprocessing.annotate_muscle_zscore to annotate
segments where muscle activity is likely present. This is done by band-pass
filtering the data in the 110-140 Hz range. Then, the envelope is taken using
the hilbert analytical signal to only consider the absolute amplitude and not
the phase of the high frequency signal. The envelope is z-scored and summed
across channels and divided by the square root of the number of channels.
Because muscle artifacts last several hundred milliseconds, a low-pass filter
is applied on the averaged z-scores at 4 Hz, to remove transient peaks.
Segments above a set threshold are annotated as BAD_muscle. In addition,
the min_length_good parameter determines the cutoff for whether short
spans of "good data" in between muscle artifacts are included in the
surrounding "BAD" annotation.
End of explanation
"""
raw.notch_filter([50, 100])
# The threshold is data dependent, check the optimal threshold by plotting
# ``scores_muscle``.
threshold_muscle = 5 # z-score
# Choose one channel type, if there are axial gradiometers and magnetometers,
# select magnetometers as they are more sensitive to muscle activity.
annot_muscle, scores_muscle = annotate_muscle_zscore(
raw, ch_type="mag", threshold=threshold_muscle, min_length_good=0.2,
filter_freq=[110, 140])
"""
Explanation: Notch filter the data:
<div class="alert alert-info"><h4>Note</h4><p>If line noise is present, you should perform notch-filtering *before*
detecting muscle artifacts. See `tut-section-line-noise` for an
example.</p></div>
End of explanation
"""
fig, ax = plt.subplots()
ax.plot(raw.times, scores_muscle)
ax.axhline(y=threshold_muscle, color='r')
ax.set(xlabel='time, (s)', ylabel='zscore', title='Muscle activity')
"""
Explanation: Plot muscle z-scores across recording
End of explanation
"""
order = np.arange(144, 164)
raw.set_annotations(annot_muscle)
raw.plot(start=5, duration=20, order=order)
"""
Explanation: View the annotations
End of explanation
"""
|
jgarciab/wwd2017 | class4/class4b_inclass.ipynb | gpl-3.0 | ##Some code to run at the beginning of the file, to be able to show images in the notebook
##Don't worry about this cell
#Print the plots in this screen
%matplotlib inline
#Be able to plot images saved in the hard drive
from IPython.display import Image
#Make the notebook wider
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
import seaborn as sns
import pylab as plt
import pandas as pd
import numpy as np
def read_our_csv():
#reading the raw data from oecd
df = pd.read_csv("../class2/data/CITIES_19122016195113034.csv",sep="\t")
#fixing the columns (the first one is ""METRO_ID"" instead of "METRO_ID")
cols = list(df.columns)
cols[0] = "METRO_ID"
df.columns = cols
#pivot the table
column_with_values = "Value"
column_to_split = ["VAR"]
variables_already_present = ["METRO_ID","Metropolitan areas","Year"]
df_fixed = df.pivot_table(column_with_values,
variables_already_present,
column_to_split).reset_index()
return df_fixed
import pandas as pd
import numpy as np
import pylab as plt
import seaborn as sns
from scipy.stats import chi2_contingency,ttest_ind
#This allows us to use R
%load_ext rpy2.ipython
#Visualize in line
%matplotlib inline
#Be able to plot images saved in the hard drive
from IPython.display import Image,display
#Make the notebook wider
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
"""
Explanation: Working with data 2017. Class 3
Contact
Javier Garcia-Bernardo
garcia@uva.nl
0. Structure
Stats
Definitions
What's a p-value?
One-tailed test vs two-tailed test
Count vs expected count (binomial test)
Independence between factors: ($\chi^2$ test)
In-class exercises to melt, pivot, concat, merge, groupby and plot.
Read data from websited
Time series
End of explanation
"""
data = pd.read_csv("data/random.csv",sep="\t",index_col=0)*100
data.head()
import seaborn as sns
sns.heatmap?
ax = sns.heatmap(data,cbar_kws={"label":"Body temperature"},cmap="YlOrRd")
ax.invert_yaxis()
plt.ylabel("Pizzas eaten")
plt.xlabel("Outside temperature")
plt.show()
sns.heatmap(data,cbar_kws={"label":"Body temperature"},cmap="YlOrRd")
plt.ylabel("Pizzas eaten")
plt.xlabel("Outside temperature")
plt.xticks(0.5+np.arange(10),["10-20","20-30","30-40","40-50","50-60","60-70","80-90","90-100","100-110","110-120"],rotation=90)
plt.show()
"""
Explanation: 0. Mental note: Inkscape
0. Mental note 2: Heatmap
Represent one quantitative variable as color, two qualitative (or binned quantitative) in the sides.
End of explanation
"""
#Read data and print the head to see how it looks like
df = pd.read_csv("../class3/data/world_bank/data.csv",na_values="..")
df.head()
#We could fix the column names with: df.columns = ["Country Name","Country Code","Series Name","Series Code",1967,1968,1969,...]
## 4.1b Fix the year of the column (make it numbers)
df = pd.read_csv("../class3/data/world_bank/data.csv",na_values="..")
old_columns = list(df.columns)
new_columns = []
for index,column_name in enumerate(old_columns):
if index < 4:
new_columns.append(column_name)
else:
year_column = int(column_name[:4])
new_columns.append(year_column)
df.columns = new_columns
#We could save our data with: df.to_csv("data/new_columns.csv",sep="\t")
df.head()
"""
Explanation: Conclusion: Pizzas make you lekker warm
Lesson of the day: Eat more pizza
1. In-class exercises
1.1 Read the data from the world bank (inside class3 folder, then folder data, subfolder world_bank), and save it with name df
End of explanation
"""
### Fix setp 1: Melt
cols = list(df.columns)
variables_already_presents = cols[:4]
columns_combine = cols[4:]
df_1 = pd.melt(df,
id_vars=variables_already_presents,
value_vars=columns_combine,
var_name="Year",
value_name="Value")
df_1.head()
### Fix step 2: Pivot
column_with_values = "Value"
column_to_split = ["Series Name"]
variables_already_present = ["Country Name","Country Code","Year"]
df_1.pivot_table(column_with_values,
variables_already_present,
column_to_split).reset_index().head()
"""
Explanation: 4.2 Fix the format and save it with name df_fixed
Remember, this was the code that we use to fix the file of the
`
### Fix setp 1: Melt
variables_already_presents = ['METRO_ID', 'Metropolitan areas','VAR']
columns_combine = cols
df = pd.melt(df,
id_vars=variables_already_presents,
value_vars=columns_combine,
var_name="Year",
value_name="Value")
df.head()
### Fix step 2: Pivot
column_with_values = "Value"
column_to_split = ["VAR"]
variables_already_present = ["METRO_ID","Metropolitan areas","Year"]
df.pivot_table(column_with_values,
variables_already_present,
column_to_split).reset_index().head()
`
End of explanation
"""
#code
df_NL =
df_CO =
"""
Explanation: 4.3 Create two dataframes with names df_NL and df_CO.
The first with the data for the Netherlands
The second with the data for Colombia
End of explanation
"""
df_pri =
df_pu =
"""
Explanation: 4.4 Concatenate/Merge (the appropriate one) the two dataframes
4.5 Create two dataframes with names df_pri and df_pu.
The first with the data for all rows and columns "country", "year" and indicator "SH.XPD.PRIV.ZS" (expenditure in health care as %GDP)
The second with the data for all rows and columns "country", "year" and indicator "SH.XPD.PUBL.ZS"
End of explanation
"""
import scipy.stats #you need to import scipy.stats
"""
Explanation: 4.6 Concatenate/Merge (the appropriate one) the two dataframes (how = "outer")
4.7 Groupby the last dataframe (step 4.6) by country code and describe
If you don't remember check class3c_groupby.ipynb
4.8 Groupby the last dataframe (step 4.6) by country code and find skewness
A skewness value > 0 means that there is more weight in the left tail of the distribution.
If you don't remember check class3c_groupby.ipynb
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | quests/tpu/flowers_resnet.ipynb | apache-2.0 | import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.9'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
"""
Explanation: Image Classification from scratch with TPUs on Cloud ML Engine using ResNet
This notebook demonstrates how to do image classification from scratch on a flowers dataset using TPUs and the resnet trainer.
End of explanation
"""
%%bash
gsutil cat gs://cloud-ml-data/img/flower_photos/train_set.csv | head -5 > /tmp/input.csv
cat /tmp/input.csv
%%bash
gsutil cat gs://cloud-ml-data/img/flower_photos/train_set.csv | sed 's/,/ /g' | awk '{print $2}' | sort | uniq > /tmp/labels.txt
cat /tmp/labels.txt
"""
Explanation: Convert JPEG images to TensorFlow Records
My dataset consists of JPEG images in Google Cloud Storage. I have two CSV files that are formatted as follows:
image-name, category
Instead of reading the images from JPEG each time, we'll convert the JPEG data and store it as TF Records.
End of explanation
"""
%%writefile copy_resnet_files.sh
#!/bin/bash
rm -rf tpu
git clone https://github.com/tensorflow/tpu
cd tpu
TFVERSION=$1
echo "Switching to version r$TFVERSION"
git checkout r$TFVERSION
cd ..
MODELCODE=tpu/models/official/resnet
OUTDIR=mymodel
rm -rf $OUTDIR
# preprocessing
cp -r imgclass $OUTDIR # brings in setup.py and __init__.py
cp tpu/tools/datasets/jpeg_to_tf_record.py $OUTDIR/trainer/preprocess.py
# model: fix imports
for FILE in $(ls -p $MODELCODE | grep -v /); do
CMD="cat $MODELCODE/$FILE "
for f2 in $(ls -p $MODELCODE | grep -v /); do
MODULE=`echo $f2 | sed 's/.py//g'`
CMD="$CMD | sed 's/^import ${MODULE}/from . import ${MODULE}/g' "
done
CMD="$CMD > $OUTDIR/trainer/$FILE"
eval $CMD
done
find $OUTDIR
echo "Finished copying files into $OUTDIR"
!bash ./copy_resnet_files.sh $TFVERSION
"""
Explanation: Clone the TPU repo
Let's git clone the repo and get the preprocessing and model files. The model code has imports of the form:
<pre>
import resnet_model as model_lib
</pre>
We will need to change this to:
<pre>
from . import resnet_model as model_lib
</pre>
End of explanation
"""
%%writefile enable_tpu_mlengine.sh
SVC_ACCOUNT=$(curl -H "Authorization: Bearer $(gcloud auth print-access-token)" \
https://ml.googleapis.com/v1/projects/${PROJECT}:getConfig \
| grep tpuServiceAccount | tr '"' ' ' | awk '{print $3}' )
echo "Enabling TPU service account $SVC_ACCOUNT to act as Cloud ML Service Agent"
gcloud projects add-iam-policy-binding $PROJECT \
--member serviceAccount:$SVC_ACCOUNT --role roles/ml.serviceAgent
echo "Done"
!bash ./enable_tpu_mlengine.sh
"""
Explanation: Enable TPU service account
Allow Cloud ML Engine to access the TPU and bill to your project
End of explanation
"""
%%bash
export PYTHONPATH=${PYTHONPATH}:${PWD}/mymodel
rm -rf /tmp/out
python -m trainer.preprocess \
--train_csv /tmp/input.csv \
--validation_csv /tmp/input.csv \
--labels_file /tmp/labels.txt \
--project_id $PROJECT \
--output_dir /tmp/out --runner=DirectRunner
!ls -l /tmp/out
"""
Explanation: Try preprocessing locally
End of explanation
"""
%%bash
export PYTHONPATH=${PYTHONPATH}:${PWD}/mymodel
gsutil -m rm -rf gs://${BUCKET}/tpu/resnet/data
python -m trainer.preprocess \
--train_csv gs://cloud-ml-data/img/flower_photos/train_set.csv \
--validation_csv gs://cloud-ml-data/img/flower_photos/eval_set.csv \
--labels_file /tmp/labels.txt \
--project_id $PROJECT \
--output_dir gs://${BUCKET}/tpu/resnet/data
"""
Explanation: Now run it over full training and evaluation datasets. This will happen in Cloud Dataflow.
End of explanation
"""
%%bash
gsutil ls gs://${BUCKET}/tpu/resnet/data
"""
Explanation: The above preprocessing step will take <b>15-20 minutes</b>. Wait for the job to finish before you proceed. Navigate to Cloud Dataflow section of GCP web console to monitor job progress. You will see something like this <img src="dataflow.png" />
Alternately, you can simply copy my already preprocessed files and proceed to the next step:
<pre>
gsutil -m cp gs://cloud-training-demos/tpu/resnet/data/* gs://${BUCKET}/tpu/resnet/copied_data
</pre>
End of explanation
"""
%%bash
echo -n "--num_train_images=$(gsutil cat gs://cloud-ml-data/img/flower_photos/train_set.csv | wc -l) "
echo -n "--num_eval_images=$(gsutil cat gs://cloud-ml-data/img/flower_photos/eval_set.csv | wc -l) "
echo "--num_label_classes=$(cat /tmp/labels.txt | wc -l)"
%%bash
TOPDIR=gs://${BUCKET}/tpu/resnet
OUTDIR=${TOPDIR}/trained
JOBNAME=imgclass_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR # Comment out this line to continue training from the last time
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.resnet_main \
--package-path=$(pwd)/mymodel/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_TPU \
--runtime-version=$TFVERSION --python-version=3.5 \
-- \
--data_dir=${TOPDIR}/data \
--model_dir=${OUTDIR} \
--resnet_depth=18 \
--train_batch_size=128 --eval_batch_size=32 --skip_host_call=True \
--steps_per_eval=250 --train_steps=1000 \
--num_train_images=3300 --num_eval_images=370 --num_label_classes=5 \
--export_dir=${OUTDIR}/export
"""
Explanation: Train on the Cloud
End of explanation
"""
%%bash
gsutil ls gs://${BUCKET}/tpu/resnet/trained/export/
"""
Explanation: The above training job will take 15-20 minutes.
Wait for the job to finish before you proceed.
Navigate to Cloud ML Engine section of GCP web console
to monitor job progress.
The model should finish with a 80-83% accuracy (results will vary):
Eval results: {'global_step': 1000, 'loss': 0.7359053, 'top_1_accuracy': 0.82954544, 'top_5_accuracy': 1.0}
End of explanation
"""
OUTDIR = 'gs://{}/tpu/resnet/trained/'.format(BUCKET)
from google.datalab.ml import TensorBoard
TensorBoard().start(OUTDIR)
TensorBoard().stop(11531)
print("Stopped Tensorboard")
"""
Explanation: You can look at the training charts with TensorBoard:
End of explanation
"""
%%bash
MODEL_NAME="flowers"
MODEL_VERSION=resnet
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/tpu/resnet/trained/export/ | tail -1)
echo "Deleting/deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
# comment/uncomment the appropriate line to run. The first time around, you will need only the two create calls
# But during development, you might need to replace a version by deleting the version and creating it again
#gcloud ml-engine versions delete --quiet ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION
"""
Explanation: These were the charts I got (I set smoothing to be zero):
<img src="resnet_traineval.png" height="50"/>
As you can see, the final blue dot (eval) is quite close to the lowest training loss, indicating that the model hasn't overfit. The top_1 accuracy on the evaluation dataset, however, is 80% which isn't that great. More data would help.
<img src="resnet_accuracy.png" height="50"/>
Deploying and predicting with model
Deploy the model:
End of explanation
"""
%%bash
saved_model_cli show --dir $(gsutil ls gs://${BUCKET}/tpu/resnet/trained/export/ | tail -1) --tag_set serve --signature_def serving_default
"""
Explanation: We can use saved_model_cli to find out what inputs the model expects:
End of explanation
"""
import base64, sys, json
import tensorflow as tf
import io
with tf.gfile.GFile('gs://cloud-ml-data/img/flower_photos/sunflowers/1022552002_2b93faf9e7_n.jpg', 'rb') as ifp:
with io.open('test.json', 'w') as ofp:
image_data = ifp.read()
img = base64.b64encode(image_data).decode('utf-8')
json.dump({"image_bytes": {"b64": img}}, ofp)
!ls -l test.json
"""
Explanation: As you can see, the model expects image_bytes. This is typically base64 encoded
To predict with the model, let's take one of the example images that is available on Google Cloud Storage <img src="http://storage.googleapis.com/cloud-ml-data/img/flower_photos/sunflowers/1022552002_2b93faf9e7_n.jpg" /> and convert it to a base64-encoded array
End of explanation
"""
%%bash
gcloud ml-engine predict --model=flowers --version=resnet --json-instances=./test.json
"""
Explanation: Send it to the prediction service
End of explanation
"""
%%bash
head -4 /tmp/labels.txt | tail -1
"""
Explanation: What does CLASS no. 3 correspond to? (remember that classes is 0-based)
End of explanation
"""
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import base64, sys, json
import tensorflow as tf
with tf.gfile.GFile('gs://cloud-ml-data/img/flower_photos/sunflowers/1022552002_2b93faf9e7_n.jpg', 'rb') as ifp:
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials,
discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json')
request_data = {'instances':
[
{"image_bytes": {"b64": base64.b64encode(ifp.read()).decode('utf-8')}}
]}
parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'flowers', 'resnet')
response = api.projects().predict(body=request_data, name=parent).execute()
print("response={0}".format(response))
"""
Explanation: Here's how you would invoke those predictions without using gcloud
End of explanation
"""
|
Danghor/Algorithms | Python/Chapter-04/Digit-Recognition.ipynb | gpl-2.0 | import gzip
import pickle
import numpy as np
"""
Explanation: Handwritten Digit Recognition using $k$-Nearest Neighbours
This notebook uses the <em style="color:blue;">$k$-nearest neighbours algorithm</em> to recognize handwritten digits. The digits we want to recognize
are stored as images of size $28 \times 28$ pixels. Each pixel $p$ is stored as a number that satisfies $0 \leq p \leq 1$. The pixel values are
interpreted as grey values: If $p = 1.0$, the pixel is completely black, while $p = 0.0$ if the pixel is white. The images are stored in the file mnist.pkl.gz. This file is compressed using gzip and the images have been pickled using the module pickle. The module pickle supports the reading and writing of Python data structures.
In order to read the images of the handwritten digits, we therefore have to import the modules gzip and pickle. The module numpy is needed to store the images as arrays.
End of explanation
"""
def load_data():
with gzip.open('mnist.pkl.gz', 'rb') as f:
train, _, test = pickle.load(f, encoding="latin1")
return (train[0], test[0], train[1], test[1])
X_train, X_test, Y_train, Y_test = load_data()
"""
Explanation: The function load_data returns a tuple of the form
$$ (\texttt{X_train}, \texttt{X_test}, \texttt{Y_train}, \texttt{Y_test}) $$
where
<ul>
<li> $\texttt{X_train}$ is a matrix storing the 50,000 training images of handwritten digits.
For each $i \in \{0,\cdots,49\,999\}$ the row $\texttt{X_train}[i, :]$ is an array of size $784$ storing a single image.
</li>
<li> $\texttt{X_test}$ is a matrix containing 10,000 images of handwritten digits that can be used for testing.</li>
<li> $\texttt{Y_train}$ is an array of size 50,000. For each $i \in \{0,\cdots,49\,999\}$ the number $\texttt{Y_train}[i]$
specifies the digit shown in the $i$th training image.
</li>
<li> $\texttt{Y_test}$ is an array of size 10,000. For each $i \in \{0,\cdots,9\,999\}$ the number $\texttt{Y_test}[i]$
specifies the digit shown in the $i$th test image.
</li>
</ul>
End of explanation
"""
X_train.shape, X_test.shape, Y_train.shape, Y_test.shape
"""
Explanation: Let us check what we have read:
End of explanation
"""
X_train[0, :]
"""
Explanation: Let us inspect the first hand written image of a digit.
End of explanation
"""
import matplotlib.pyplot as plt
"""
Explanation: This is an array with 784 entries. Let us draw the corresponding picture.
End of explanation
"""
def show_digits(rows, columns, offset=0):
f, axarr = plt.subplots(rows, columns)
for r in range(rows):
for c in range(columns):
i = r * columns + c + offset
image = 1 - X_train[i, :]
image = np.reshape(image, (28, 28))
axarr[r, c].imshow(image, cmap="gray")
axarr[r, c].axis('off')
plt.savefig("digits.pdf")
plt.show()
"""
Explanation: The function $\texttt{show_digit}(\texttt{row}, \texttt{columns}, \texttt{offset})$
shows $\texttt{row} \cdot \texttt{columns}$ images of the training data. The first image shown is the image at index $\texttt{offset}$.
End of explanation
"""
show_digits(4, 6)
"""
Explanation: We take a look at the first 24 images.
End of explanation
"""
def distance(x, y):
return np.sqrt(np.sum((x - y)**2))
"""
Explanation: Given two arrays $\mathbf{x}$ and $\mathbf{y}$ of the same dimension $n$, the function $\texttt{distance}(\mathbf{x}, \mathbf{y})$ computes the
<em style="color:blue;">Euclidean distance</em> between $\mathbf{x}$ and $\mathbf{y}$. This distance is defined as follows:
$$ \sqrt{\sum\limits_{i=1}^n (x_i - y_i)^2} $$
End of explanation
"""
distance(X_train[0,:], X_train[1,:])
"""
Explanation: For example, the distance between the first two images of the training set is computed as follows:
End of explanation
"""
distance(X_train[8,:], X_train[14,:])
"""
Explanation: The distance between the 9th and the 15th image should be smaller, because both of these images show the digit $1$ and hence these images are quite similar.
This similarity results in a smaller distance between these images.
End of explanation
"""
def maxCount(L):
Frequencies = {} # number of occurrences for each digit
most_frequent = L[0] # most frequent digit so far
most_frequent_count = 1 # number of occurrences of most frequent digit
for d in L:
if d in Frequencies:
Frequencies[d] += 1
else:
Frequencies[d] = 1
if Frequencies[d] > most_frequent_count:
most_frequent = d
most_frequent_count = Frequencies[d]
return most_frequent, most_frequent_count / len(L)
maxCount([3, 3, 4, 2, 1, 2, 3, 2, 5, 3])
"""
Explanation: Given a list $L$ of digits, the function $\texttt{maxCounts}(L)$ returns a pair $(d, p)$ where $d$ is the digit that occurs most frequently in $L$
and $p$ is the percentage of occurrences of $d$ in $L$. For example, we have
$$ \texttt{maxCounts}([5,2,3,5,2,5,6,5,7,8]) = (5, 0.4) $$
because the digit $5$ is the most frequent digit in the list $[5,2,3,5,2,5,6,5,7,8]$ and $40$% of the digits in this list are fives.
End of explanation
"""
def digit(x, k):
n = X_train.shape[0] # number of all training images
Distances = [ (distance(X_train[i, :], x), i) for i in range(n)]
Neighbours = [ Y_train[i] for _, i in sorted(Distances)]
return maxCount(Neighbours[:k])
"""
Explanation: Given an image of a digit stored in the vector $\mathbf{x}$ and a number of neighbours $k$, the function $\texttt{digit}(\mathbf{x}, k)$ computes those
$k$ images in the training set X_train that are <em style="color:blue;">closest</em> to the image $\mathbf{x}$. Here
<em style="color:blue;">closeness</em> of images is defined in terms of the <em style="color:blue;">Euclidean distance</em> of the vectors that store the
images. From these $k$ images of the training set the function chooses the digit that occurs most frequently. It returns a pair $(d, p)$ where $d$ is the digit that is most frequently occurring in the list of $k$ neighbours and $p$ is the percentage of images in the $k$ neighbours of $\mathbf{x}$ that show
the digit $d$.
End of explanation
"""
def show_image(n):
image = 1 - X_test[n, :]
image = np.reshape(image, (28, 28))
plt.imshow(image, cmap="gray")
plt.show()
"""
Explanation: The function show_image(n) shows the $n^\mathrm{th}$ test image.
End of explanation
"""
def test(n, k):
print(f'Testing image {n}:')
show_image(n)
d, p = digit(X_test[n, :], k)
print(f'I believe with a certainty of {p * 100}% that the image shows the digit {d}.')
test(2, 13)
"""
Explanation: This function performs $k$-nearest neighbour classification for the $n$-th image of the test set. It also prints the image.
End of explanation
"""
%%time
for n in range(20):
test(n, 13)
"""
Explanation: Let us classify the first 20 images from the test set.
End of explanation
"""
def check(n, k):
d, p = digit(X_test[n, :], k)
if d == Y_test[n]:
return 0
else:
print(f"\nImage number {n} wrongly identified: I guessed a {d}, but it's a {Y_test[n]}.")
show_image(n)
return 1
%%time
errors = 0
for n in range(10_000):
errors += check(n, 7)
print(f'There were {errors} errors out of 10000 images.')
"""
Explanation: Let us check how much memory is used at the moment. The function call below prints all the memory of our machine that is currently used.
End of explanation
"""
|
tpin3694/tpin3694.github.io | python/pandas_list_comprehension.ipynb | mit | # Import modules
import pandas as pd
# Set ipython's max row display
pd.set_option('display.max_row', 1000)
# Set iPython's max column width to 50
pd.set_option('display.max_columns', 50)
"""
Explanation: Title: Using List Comprehensions With Pandas
Slug: pandas_list_comprehension
Summary: Using List Comprehensions With Pandas
Date: 2016-05-01 12:00
Category: Python
Tags: Data Wrangling
Authors: Chris Albon
Preliminaries
End of explanation
"""
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'year': [2012, 2012, 2013, 2014, 2014],
'reports': [4, 24, 31, 2, 3]}
df = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])
df
"""
Explanation: Create an example dataframe
End of explanation
"""
# Create a variable
next_year = []
# For each row in df.years,
for row in df['year']:
# Add 1 to the row and append it to next_year
next_year.append(row + 1)
# Create df.next_year
df['next_year'] = next_year
# View the dataframe
df
"""
Explanation: List Comprehensions
As a loop
End of explanation
"""
# Subtract 1 from row, for each row in df.year
df['previous_year'] = [row-1 for row in df['year']]
df
"""
Explanation: As list comprehension
End of explanation
"""
|
John-Keating/ThinkStats2 | code/chap04ex.ipynb | gpl-3.0 | %matplotlib inline
import nsfg
preg = nsfg.ReadFemPreg()
"""
Explanation: Exercise from Think Stats, 2nd Edition (thinkstats2.com)<br>
Allen Downey
Read the pregnancy file.
End of explanation
"""
import thinkstats2 as ts
live = preg[preg.outcome == 1]
wgt_cdf = ts.Cdf(live.totalwgt_lb, label = 'weight')
"""
Explanation: Select live births, then make a CDF of <tt>totalwgt_lb</tt>.
End of explanation
"""
import thinkplot as tp
tp.Cdf(wgt_cdf, label = 'weight')
tp.Show()
"""
Explanation: Display the CDF.
End of explanation
"""
import random
random.random?
import random
thousand = [random.random() for x in range(1000)]
thousand_pmf = ts.Pmf(thousand, label = 'rando')
tp.Pmf(thousand_pmf, linewidth=0.1)
tp.Show()
t_hist = ts.Hist(thousand)
tp.Hist(t_hist, label = "rando")
tp.Show()
"""
Explanation: Find out how much you weighed at birth, if you can, and compute CDF(x).
If you are a first child, look up your birthweight in the CDF of first children; otherwise use the CDF of other children.
Compute the percentile rank of your birthweight
Compute the median birth weight by looking up the value associated with p=0.5.
Compute the interquartile range (IQR) by computing percentiles corresponding to 25 and 75.
Make a random selection from <tt>cdf</tt>.
Draw a random sample from <tt>cdf</tt>.
Draw a random sample from <tt>cdf</tt>, then compute the percentile rank for each value, and plot the distribution of the percentile ranks.
Generate 1000 random values using <tt>random.random()</tt> and plot their PMF.
End of explanation
"""
thousand_cdf = ts.Cdf(thousand, label='rando')
tp.Cdf(thousand_cdf)
tp.Show()
import scipy.stats
scipy.stats?
"""
Explanation: Assuming that the PMF doesn't work very well, try plotting the CDF instead.
End of explanation
"""
|
bosscha/alma-calibrator | notebooks/2mass/10_PCA_combine_test_matchagain.ipynb | gpl-2.0 | obj = ["PKS J0006-0623", 1.55789, -6.39315, 1]
# name, ra, dec, radius of cone
obj_name = obj[0]
obj_ra = obj[1]
obj_dec = obj[2]
cone_radius = obj[3]
obj_coord = coordinates.SkyCoord(ra=obj_ra, dec=obj_dec, unit=(u.deg, u.deg), frame="icrs")
data_2mass = Irsa.query_region(obj_coord, catalog="fp_psc", radius=cone_radius * u.deg)
data_wise = Irsa.query_region(obj_coord, catalog="allwise_p3as_psd", radius=cone_radius * u.deg)
__data_galex = Vizier.query_region(obj_coord, catalog='II/335', radius=cone_radius * u.deg)
data_galex = __data_galex[0]
num_2mass = len(data_2mass)
num_wise = len(data_wise)
num_galex = len(data_galex)
print("Number of object in (2MASS, WISE, GALEX): ", num_2mass, num_wise, num_galex)
"""
Explanation: Get the data
2MASS => effective resolution of the 2MASS system is approximately 5"
WISE => 3.4, 4.6, 12, and 22 ฮผm (W1, W2, W3, W4) with an angular resolution of 6.1", 6.4", 6.5", & 12.0"
GALEX imaging => Five imaging surveys in a Far UV band (1350โ1750ร
) and Near UV band (1750โ2800ร
) with 6-8 arcsecond resolution (80% encircled energy) and 1 arcsecond astrometry, and a cosmic UV background map.
End of explanation
"""
# use only coordinate columns
ra_2mass = data_2mass['ra']
dec_2mass = data_2mass['dec']
c_2mass = coordinates.SkyCoord(ra=ra_2mass, dec=dec_2mass, unit=(u.deg, u.deg), frame="icrs")
ra_wise = data_wise['ra']
dec_wise = data_wise['dec']
c_wise = coordinates.SkyCoord(ra=ra_wise, dec=dec_wise, unit=(u.deg, u.deg), frame="icrs")
ra_galex = data_galex['RAJ2000']
dec_galex = data_galex['DEJ2000']
c_galex = coordinates.SkyCoord(ra=ra_galex, dec=dec_galex, unit=(u.deg, u.deg), frame="icrs")
####
sep_min = 6.0 * u.arcsec # minimum separation in arcsec
# Only 2MASS and WISE matching
#
idx_2mass, idx_wise, d2d, d3d = c_wise.search_around_sky(c_2mass, sep_min)
# select only one nearest if there are more in the search reagion (minimum seperation parameter)!
print("Only 2MASS and WISE: ", len(idx_2mass))
"""
Explanation: Matching coordinates
End of explanation
"""
# from matching of 2 cats (2MASS and WISE) coordinate
w1 = data_wise[idx_wise]['w1mpro']
j = data_2mass[idx_2mass]['j_m']
w1j = w1-j
# match between WISE and 2MASS
data_wise_matchwith_2mass = data_wise[idx_wise] # WISE dataset
cutw1j = -1.7
galaxy = data_wise_matchwith_2mass[w1j < cutw1j] # https://academic.oup.com/mnras/article/448/2/1305/1055284
w1j_galaxy = w1j[w1j<cutw1j]
w1_galaxy = w1[w1j<cutw1j]
plt.scatter(w1j, w1, marker='o', color='blue')
plt.scatter(w1j_galaxy, w1_galaxy, marker='.', color="red")
plt.axvline(x=cutw1j) # https://academic.oup.com/mnras/article/448/2/1305/1055284
"""
Explanation: Plot W1-J vs W1
End of explanation
"""
# GALEX
###
# coord of object in 2mass which match wise (first objet/nearest in sep_min region)
c_2mass_matchwith_wise = c_2mass[idx_2mass]
c_wise_matchwith_2mass = c_wise[idx_wise]
#Check with 2mass cut
idx_2mass_wise_galex, idx_galex1, d2d, d3d = c_galex.search_around_sky(c_2mass_matchwith_wise, sep_min)
num_galex1 = len(idx_galex1)
#Check with wise cut
idx_wise_2mass_galex, idx_galex2, d2d, d3d = c_galex.search_around_sky(c_wise_matchwith_2mass, sep_min)
num_galex2 = len(idx_galex2)
print("Number of match in 2MASS cut (with WISE): ", num_galex1)
print("Number of match in WISE cut (with 2MASS): ", num_galex2)
# diff/average
print("Confusion level: ", abs(num_galex1 - num_galex2)/np.mean([num_galex1, num_galex2])*100, "%")
"""
Explanation: W1-J < -1.7 => galaxy
W1-J > -1.7 => stars
only 2 object are galaxy?
End of explanation
"""
# Choose which one is smaller!
if num_galex1 < num_galex2:
select_from_galex = idx_galex1
else:
select_from_galex = idx_galex2
print(len(select_from_galex))
match_galex = data_galex[select_from_galex]
c_selected_galex = c_galex[select_from_galex]
# Select other catalog using GALEX again
# 2MASS
idx_2mass, idx_galex1, d2d, d3d = c_2mass.search_around_sky(c_selected_galex, sep_min)
# WISE
idx_wise, idx_galex2, d2d, d3d = c_wise.search_around_sky(c_selected_galex, sep_min)
print("Number of match in GALEX: ", len(c_selected_galex))
print("Number of match in 2MASS: ", len(idx_2mass))
print("Number of match in WISE : ", len(idx_wise))
"""
Explanation: Filter all Cats
End of explanation
"""
|
mila-udem/summerschool2015 | fuel_tutorial/fuel_logreg.ipynb | bsd-3-clause | import numpy
import theano
from theano import tensor
# Size of the data
n_in = 28 * 28
# Number of classes
n_out = 10
x = tensor.matrix('x')
W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX),
name='W',
borrow=True)
b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX),
name='b',
borrow=True)
"""
Explanation: Fuel exercise: logistic regression
This notebook is a copy of logistic_regression.ipynb, without the code downloading and unpacking the dataset.
Your goal is to use Fuel as a provider of data, instead of the previous approach. You will have to update the code in several places.
The solution is at fuel_logreg_solution.ipynb.
Get the data
The model
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix $W$ and a bias vector $b$. Classification is
done by projecting an input vector onto a set of hyperplanes, each of which
corresponds to a class. The distance from the input to a hyperplane reflects
the probability that the input is a member of the corresponding class.
Mathematically, the probability that an input vector $x$ is a member of a
class $i$, a value of a stochastic variable $Y$, can be written as:
$$P(Y=i|x, W,b) = softmax_i(W x + b) = \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}$$
The model's prediction $y_{pred}$ is the class whose probability is maximal, specifically:
$$ y_{pred} = {\rm argmax}_i P(Y=i|x,W,b)$$
Now, let us define our input variables. First, we need to define the dimension of our tensors:
- n_in is the length of each training vector,
- n_out is the number of classes.
Our variables will be:
- x is a matrix, where each row contains a different example of the dataset. Its shape is (batch_size, n_in), but batch_size does not have to be specified in advance, and can change during training.
- W is a shared matrix, of shape (n_in, n_out), initialized with zeros. Column k of W represents the separation hyperplane for class k.
- b is a shared vector, of length n_out, initialized with zeros. Element k of b represents the free parameter of hyperplane k.
End of explanation
"""
p_y_given_x = tensor.nnet.softmax(tensor.dot(x, W) + b)
y_pred = tensor.argmax(p_y_given_x, axis=1)
"""
Explanation: Now, we can build a symbolic expression for the matrix of class-membership probability (p_y_given_x), and for the class whose probability is maximal (y_pred).
End of explanation
"""
y = tensor.lvector('y')
log_prob = tensor.log(p_y_given_x)
log_likelihood = log_prob[tensor.arange(y.shape[0]), y]
loss = - log_likelihood.mean()
"""
Explanation: Defining a loss function
Learning optimal model parameters involves minimizing a loss function. In the
case of multi-class logistic regression, it is very common to use the negative
log-likelihood as the loss. This is equivalent to maximizing the likelihood of the
data set $\cal{D}$ under the model parameterized by $\theta$. Let
us first start by defining the likelihood $\cal{L}$ and loss
$\ell$:
$$\mathcal{L} (\theta={W,b}, \mathcal{D}) =
\sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \
\ell (\theta={W,b}, \mathcal{D}) = - \mathcal{L} (\theta={W,b}, \mathcal{D})
$$
Again, we will express those expressions using Theano. We have one additional input, the actual target class y:
- y is an input vector of integers, of length batch_size (which will have to match the length of x at runtime). The length of y can be symbolically expressed by y.shape[0].
- log_prob is a (batch_size, n_out) matrix containing the log probabilities of class membership for each example.
- arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... batch_size-1]
- log_likelihood is a vector containing the log probability of the target, for each example.
- loss is the mean of the negative log_likelihood over the examples in the minibatch.
End of explanation
"""
g_W, g_b = theano.grad(cost=loss, wrt=[W, b])
"""
Explanation: Training procedure
This notebook will use the method of stochastic gradient descent with mini-batches (MSGD) to find values of W and b that minimize the loss.
We can let Theano compute symbolic expressions for the gradient of the loss wrt W and b.
End of explanation
"""
learning_rate = numpy.float32(0.13)
new_W = W - learning_rate * g_W
new_b = b - learning_rate * g_b
"""
Explanation: g_W and g_b are symbolic variables, which can be used as part of a computation graph. In particular, let us define the expressions for one step of gradient descent for W and b, for a fixed learning rate.
End of explanation
"""
train_model = theano.function(inputs=[x, y],
outputs=loss,
updates=[(W, new_W),
(b, new_b)])
"""
Explanation: We can then define update expressions, or pairs of (shared variable, expression for its update), that we will use when compiling the Theano function. The updates will be performed each time the function gets called.
The following function, train_model, returns the loss on the current minibatch, then changes the values of the shared variables according to the update rules. It needs to be passed x and y as inputs, but not the shared variables, which are implicit inputs.
The entire learning algorithm thus consists in looping over all examples in the dataset, considering all the examples in one minibatch at a time, and repeatedly calling the train_model function.
End of explanation
"""
misclass_nb = tensor.neq(y_pred, y)
misclass_rate = misclass_nb.mean()
test_model = theano.function(inputs=[x, y],
outputs=misclass_rate)
"""
Explanation: Testing the model
When testing the model, we are interested in the number of misclassified examples (and not only in the likelihood). Here, we build a symbolic expression for retrieving the number of misclassified examples in a minibatch.
This will also be useful to apply on the validation and testing sets, in order to monitor the progress of the model during training, and to do early stopping.
End of explanation
"""
import timeit
## Define a couple of helper variables and functions for the optimization
batch_size = 500
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.shape[0] / batch_size
n_valid_batches = valid_set_x.shape[0] / batch_size
n_test_batches = test_set_x.shape[0] / batch_size
def get_minibatch(i, dataset_x, dataset_y):
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
batch_x = dataset_x[start_idx:end_idx]
batch_y = dataset_y[start_idx:end_idx]
return (batch_x, batch_y)
## early-stopping parameters
# maximum number of epochs
n_epochs = 1000
# look as this many examples regardless
patience = 5000
# wait this much longer when a new best is found
patience_increase = 2
# a relative improvement of this much is considered significant
improvement_threshold = 0.995
# go through this many minibatches before checking the network on the validation set;
# in this case we check every epoch
validation_frequency = min(n_train_batches, patience / 2)
from six.moves import xrange
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_x, minibatch_y = get_minibatch(minibatch_index, train_set_x, train_set_y)
minibatch_avg_cost = train_model(minibatch_x, minibatch_y)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = []
for i in xrange(n_valid_batches):
valid_xi, valid_yi = get_minibatch(i, valid_set_x, valid_set_y)
validation_losses.append(test_model(valid_xi, valid_yi))
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
# improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = []
for i in xrange(n_test_batches):
test_xi, test_yi = get_minibatch(i, test_set_x, test_set_y)
test_losses.append(test_model(test_xi, test_yi))
test_score = numpy.mean(test_losses)
print(' epoch %i, minibatch %i/%i, test error of best model %f %%' %
(epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.))
# save the best parameters
numpy.savez('best_model.npz', W=W.get_value(), b=b.get_value())
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete with best validation score of %f %%, '
'with test performance %f %%' %
(best_validation_loss * 100., test_score * 100.))
print('The code ran for %d epochs, with %f epochs/sec' %
(epoch, 1. * epoch / (end_time - start_time)))
"""
Explanation: Training the model
Here is the main training loop of the algorithm:
- For each epoch, or pass through the training set
- split the training set in minibatches, and call train_model on each minibatch
- split the validation set in minibatches, and call test_model on each minibatch to measure the misclassification rate
- if the misclassification rate has not improved in a while, stop training
- Measure performance on the test set
The early stopping procedure is what decide whether the performance has improved enough. There are many variants, and we will not go into the details of this one here.
We first need to define a few parameters for the training loop and the early stopping procedure.
End of explanation
"""
|
as595/AllOfYourBases | CDT-KickOff/LECTURE/GPMIntro.ipynb | gpl-3.0 | %matplotlib inline
"""
Explanation: ==============================================================================================
‹ GPMIntro.ipynb ›
Copyright (C) ‹ 2017 › ‹ Anna Scaife - anna.scaife@manchester.ac.uk ›
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
==============================================================================================
[AMS - 170402] Notebook created for SKA-SA Newton Big Data Summer School, Cape Town , April 2017.
This notebook uses GPM to predict a signal. It recreates some of the plots from Roberts et al. 2013 (http://www.robots.ox.ac.uk/~sjrob/Pubs/Phil.%20Trans.%20R.%20Soc.%20A-2013-Roberts-.pdf). It is a teaching resource and is accompanied by the lecture "Can you Predict the Future..?".
All Python libraries used in this example can be installed using pip.
To start let's specify that we want our figures to appear embedded in the notebook:
End of explanation
"""
import numpy as np
import pylab as pl
"""
Explanation: Then let's import all the libraries we need...
End of explanation
"""
def cov_kernel(x1,x2,h,lam):
"""
Squared-Exponential covariance kernel
"""
k12 = h**2*np.exp(-1.*(x1 - x2)**2/lam**2)
return k12
"""
Explanation: Make the covariance kernel a squared-exponential,
$k(x_1,x_2) = h^2 \exp{ \left( \frac{-(x_1 - x_2)^2}{\lambda^2} \right)}$,
just like Eq. 3.11 in Roberts et al. (2012).
http://www.robots.ox.ac.uk/~sjrob/Pubs/Phil.%20Trans.%20R.%20Soc.%20A-2013-Roberts-.pdf
End of explanation
"""
def make_K(x, h, lam):
"""
Make covariance matrix from covariance kernel
"""
# for a data array of length x, make a covariance matrix x*x:
K = np.zeros((len(x),len(x)))
for i in range(0,len(x)):
for j in range(0,len(x)):
# calculate value of K for each separation:
K[i,j] = cov_kernel(x[i],x[j],h,lam)
return K
"""
Explanation: We can use this kernel to calculate the value of each element in our covariance matrix:
$\mathbf{K(x,x)} = \left(
\begin{array}{cccc}
k(x_1,x_1) & k(x_1,x_2) & ... & k(x_1,x_n) \
k(x_2,x_1) & k(x_2,x_2) & ... & k(x_2,x_n) \
\vdots & \vdots & \vdots & \vdots \
k(x_n,x_1) & k(x_n,x_2) & ... & k(x_n,x_n)
\end{array}
\right).$
We can then populate a covariance matrix, $K(\mathbf{x},\mathbf{x})$, for our data:
End of explanation
"""
# make an array of 200 evenly spaced positions between 0 and 20:
x1 = np.arange(0, 20.,0.01)
for i in range(0,3):
h = 1.0
if (i==0): lam = 0.1
if (i==1): lam = 1.0
if (i==2): lam = 5.0
# make a covariance matrix:
K = make_K(x1,h,lam)
# five realisations:
for j in range(0,5):
# draw samples from a co-variate Gaussian distribution, N(0,K):
y1 = np.random.multivariate_normal(np.zeros(len(x1)),K)
tmp2 = '23'+str(i+3+1)
pl.subplot(int(tmp2))
pl.plot(x1,y1)
tmp1 = '23'+str(i+1)
pl.subplot(int(tmp1))
pl.imshow(K)
pl.title(r"$\lambda = $"+str(lam))
pl.show()
"""
Explanation: Using this kernel we can then recreate Fig. 5 from Roberts et al. (2012).
End of explanation
"""
# set number of training points
nx_training = 5
# randomly select the training points:
tmp = np.random.uniform(low=0.0, high=2000.0, size=nx_training)
tmp = tmp.astype(int)
condition = np.zeros_like(x1)
for i in tmp: condition[i] = 1.0
y_train = y1[np.where(condition==1.0)]
x_train = x1[np.where(condition==1.0)]
y_test = y1[np.where(condition==0.0)]
x_test = x1[np.where(condition==0.0)]
"""
Explanation: If we then take the final realization, which has $\lambda = 5$, and select 5 points from it randomly we can calculate the posterior mean and variance at every point based on those five input data.
The mean and variance are given by Eq. 3.8 & 3.9 in Roberts et al. (2012) or Eq. 2.25 & 2.26 in Rasmussen & Williams.
First let's select our training data points and our test data points:
End of explanation
"""
# define the covariance matrix:
K = make_K(x_train,h,lam)
# take the inverse:
iK = np.linalg.inv(K)
"""
Explanation: We then use our training data points to define a covariance matrix:
End of explanation
"""
mu=[];sig=[]
for xx in x_test:
# find the 1d covariance matrix:
K_x = cov_kernel(xx, x_train, h, lam)
# find the kernel for (x,x):
k_xx = cov_kernel(xx, xx, h, lam)
# calculate the posterior mean and variance:
mu_xx = np.dot(K_x.T,np.dot(iK,y_train))
sig_xx = k_xx - np.dot(K_x.T,np.dot(iK,K_x))
mu.append(mu_xx)
sig.append(np.sqrt(np.abs(sig_xx))) # note sqrt to get stdev from variance
"""
Explanation: For each of our test data points we can then make a prediction of the value at $x_{\ast}$ and the uncertainly (standard deviation):
End of explanation
"""
# mu and sig are currently lists - turn them into numpy arrays:
mu=np.array(mu);sig=np.array(sig)
# make some plots:
# left-hand plot
ax = pl.subplot(121)
pl.scatter(x_train,y_train) # plot the training points
pl.plot(x1,y1,ls=':') # plot the original data they were drawn from
pl.title("Input")
# right-hand plot
ax = pl.subplot(122)
pl.plot(x_test,mu,ls='-') # plot the predicted values
pl.plot(x_test,y_test,ls=':') # plot the original values
# shade in the area inside a one standard deviation bound:
ax.fill_between(x_test,mu-sig,mu+sig,facecolor='lightgrey', lw=0, interpolate=True)
pl.title("Predicted")
pl.scatter(x_train,y_train) # plot the training points
# display the plot:
pl.show()
"""
Explanation: Let's plot this up:
End of explanation
"""
|
csiu/100daysofcode | datamining/2017-03-04-day08.ipynb | mit | d = cmudict.dict()
def readability_ease(num_sentences, num_words, num_syllables):
asl = num_words / num_sentences
asw = num_syllables / num_words
return(206.835 - (1.015 * asl) - (84.6 * asw))
def readability_ease_interpretation(x):
if 90 <= x:
res = "5th grade] "
res += "Very easy to read. Easily understood by an average 11-year-old student."
elif 80 <= x < 90:
res = "6th grade] "
res += "Easy to read. Conversational English for consumers."
elif 70 <= x < 80:
res = "7th grade] "
res += "Fairly easy to read."
elif 60 <= x < 70:
res = "8th & 9th grade] "
res += "Plain English. Easily understood by 13- to 15-year-old students."
elif 50 <= x < 60:
res = "10th to 12th grade] "
res += "Fairly difficult to read."
elif 30 <= x < 50:
res = "College] "
res += "Difficult to read."
elif 0 <= x < 30:
res = "College Graduate] "
res += "Very difficult to read. Best understood by university graduates."
print("[{:.1f}|{}".format(x, res))
def count_syllables(word):
w = word.lower()
if w in d:
return([len(list(y for y in x if isdigit(y[-1]))) for x in d[w]][0])
else:
return(-1)
"""
Explanation: layout: post
author: csiu
date: 2017-03-04
title: "Day08:"
categories: update
tags:
- 100daysofcode
- text-mining
excerpt:
DAY 08 - Mar 4, 2017
In continuation with yesterday, I want to know the reading level of Reddit.
Flesch Reading Ease
End of explanation
"""
def flesch_reading_ease(text, show_details = False):
## Preprocessing
text = text.lower()
sentences = nltk.tokenize.sent_tokenize(text)
words = nltk.wordpunct_tokenize(re.sub('[^a-zA-Z_ ]', '',text))
syllables = [count_syllables(word) for word in words]
## Count sentences, words, and syllables
## Skip words that do not exist in dictionary
num_sentences = len(sentences)
num_unknown_words = syllables.count(-1)
num_words = len(words) - num_unknown_words
num_syllables = sum([s for s in syllables if s > 0])
## Calculate
if (num_sentences == 0 or num_words == 0): return None
fre = readability_ease(num_sentences, num_words, num_syllables)
if show_details:
return {
"sentence" : num_sentences,
"word" : num_words,
"syllables" : num_syllables,
"unknown": num_unknown_words
}
return(fre)
text = "Hello world, how are you? I am great. Thank you for asking!"
flesch_reading_ease(text, show_details = True)
"""
Explanation: We make a few modifications
show_details was added to show the number of sentences, words, and syllables were used to make up the Flesch Reading Ease score
make it so that words not in the dictionary are skipped (and that our program does not break)
Ensure not divide by 0
End of explanation
"""
# Load my secrets from external file
with open("_api-reddit.yaml", 'r') as stream:
my_secret = yaml.load(stream)
client_id = my_secret['client_id']
client_secret = my_secret['client_secret']
# Create instance of Reddit
reddit = praw.Reddit(user_agent='Comment Extraction (by /u/USERNAME)',
client_id=client_id, client_secret=client_secret)
def parse_reddit(reddit, the_url):
submission = reddit.submission(url=the_url)
# Replace MoreComments
submission.comments.replace_more(limit=0)
data = []
for comment in submission.comments.list():
data.append(comment.body)
return(data)
"""
Explanation: What about Reddit?
In Day03, we access Reddit Data from an API
End of explanation
"""
the_url = 'https://www.reddit.com/r/AskReddit/comments/5wkkwg/men_of_reddit_whats_the_biggest_im_a_princess_red/'
data = parse_reddit(reddit, the_url)
## Check that it works for 1 comment
text = data[0]
print(text)
print(flesch_reading_ease(text, show_details = True))
fre = flesch_reading_ease(text)
readability_ease_interpretation(fre)
## To data frame
x = [flesch_reading_ease(text) for text in data]
df = pd.DataFrame({"Princess":x})
## Is there NA?
df[df.Princess.isnull()]
data[301]
"""
Explanation: Same thread, different question
Lets find the Flesch Reading Ease of the Men of Reddit, what's the biggest "I'm a princess" red flag? (thewhackcat, 2017) thread.
End of explanation
"""
readability_ease_interpretation(df.Princess.mean())
"""
Explanation: NA are mostly because words are not real
Average reading level
On average, 7th grade level
End of explanation
"""
urls = {
"elif" : "https://www.reddit.com/r/explainlikeimfive/comments/5xle3c/eli5_what_would_happen_if_you_take_a_compass_into/",
"showerthoughts" : "https://www.reddit.com/r/Showerthoughts/comments/5xippf/college_would_be_a_lot_more_affordable_if_they/",
"gaming" : "https://www.reddit.com/r/gaming/comments/5xhpqs/when_the_game_knows_shit_is_about_to_go_down/",
"askreddit" : "https://www.reddit.com/r/AskReddit/comments/5xhvlq/whats_your_favorite_insult_that_does_not_contain/",
"tifu" : "https://www.reddit.com/r/tifu/comments/5xhfwh/tifu_by_being_courteous/"
}
random.seed(10)
df = {}
for k in urls.keys():
data = parse_reddit(reddit, urls[k])
fres = [flesch_reading_ease(text) for text in data]
fres = [fre for fre in fres if fre != None]
df[k] = random.sample(k=75, population=fres)
df = pd.DataFrame(df)
df.plot.kde(xlim=[-6,150])
df.plot.box(vert=False, xlim=[0, 156])
"""
Explanation: More threads
Explain like I'm five: ELI5: What would happen if you take a compass into space?
Shower thoughts: College would be a lot more affordable if they stopped requiring courses that have nothing to do with people's major.
Gaming When the game knows shit is about to go down
Ask reddit What's your favorite insult that does not contain a curse word?
TIFU TIFU by being courteous
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_point_spread.ipynb | bsd-3-clause | import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
"""
Explanation: Corrupt known signal with point spread
The aim of this tutorial is to demonstrate how to put a known signal at a
desired location(s) in a :class:mne.SourceEstimate and then corrupt the
signal with point-spread by applying a forward and inverse solution.
End of explanation
"""
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
nave = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
"""
Explanation: First, we set some parameters.
End of explanation
"""
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,
use_cps=False)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.RawFIF(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [l.name for l in labels]
n_labels = len(labels)
"""
Explanation: Load the MEG data
End of explanation
"""
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
"""
Explanation: Estimate the background noise covariance from the baseline period
End of explanation
"""
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
"""
Explanation: Generate sinusoids in two spatially distant labels
End of explanation
"""
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subject='sample',
subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
"""
Explanation: Find the center vertices in source space of each label
We want the known signal in each label to only be active at the center. We
create a mask for each label that is 1 at the center vertex and 0 at all
other vertices in the label. This mask is then used when simulating
source-space data.
End of explanation
"""
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
"""
Explanation: Create source-space data with known signals
Put known signals onto surface vertices using the array of signals and
the label masks (stored in labels[i].values).
End of explanation
"""
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
figs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]
brain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)
"""
Explanation: Plot original signals
Note that the original signals are highly concentrated (point) sources.
End of explanation
"""
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,
random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
"""
Explanation: Simulate sensor-space signals
Use the forward solution and add Gaussian noise to simulate sensor-space
(evoked) data from the known source-space signals. The amount of noise is
controlled by nave (higher values imply less noise).
End of explanation
"""
figs = [mlab.figure(5), mlab.figure(6), mlab.figure(7), mlab.figure(8)]
brain_inv = stc_inv.plot(figure=figs, **kwargs)
"""
Explanation: Plot the point-spread of corrupted signal
Notice that after applying the forward- and inverse-operators to the known
point sources that the point sources have spread across the source-space.
This spread is due to the minimum norm solution so that the signal leaks to
nearby vertices with similar orientations so that signal ends up crossing the
sulci and gyri.
End of explanation
"""
|
indiependente/Social-Networks-Structure | results/Watts-Strogatz Results Analysis.ipynb | mit | #!/usr/bin/python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from stats import parse_results, get_percentage, get_avg_per_seed, draw_pie, draw_bars, draw_bars_comparison, draw_avgs
"""
Explanation: Watts-Strogatz Graph Experiments Output Visualization
End of explanation
"""
pr, eigen, bet = parse_results('test_ws.txt')
"""
Explanation: Parse results
End of explanation
"""
draw_pie(get_percentage(pr))
"""
Explanation: PageRank Seeds Percentage
How many times the "Top X" nodes from PageRank have led to the max infection
End of explanation
"""
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(pr)+[(0, np.mean(pr[:,1]))]))
"""
Explanation: Avg adopters per seed comparison
End of explanation
"""
draw_pie(get_percentage(eigen))
"""
Explanation: Eigenvector Seeds Percentage
How many times the "Top X" nodes from Eigenvector have led to the max infection
End of explanation
"""
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(eigen)+[(0, np.mean(eigen[:,1]))]))
"""
Explanation: Avg adopters per seed comparison
End of explanation
"""
draw_pie(get_percentage(bet))
"""
Explanation: Betweenness Seeds Percentage
How many times the "Top X" nodes from Betweenness have led to the max infection
End of explanation
"""
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(bet)+[(0, np.mean(bet[:,1]))]))
"""
Explanation: Avg adopters per seed comparison
End of explanation
"""
draw_bars(np.sort(pr.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(eigen.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(bet.view('i8,i8'), order=['f0'], axis=0).view(np.int))
"""
Explanation: 100 runs adopters comparison
End of explanation
"""
pr_mean = np.mean(pr[:,1])
pr_mean_seed = np.mean(pr[:,0])
print 'Avg Seed:',pr_mean_seed, 'Avg adopters:', pr_mean
"""
Explanation: Centrality Measures Averages
PageRank avg adopters and seed
End of explanation
"""
eigen_mean = np.mean(eigen[:,1])
eigen_mean_seed = np.mean(eigen[:,0])
print 'Avg Seed:',eigen_mean_seed, 'Avg adopters:',eigen_mean
"""
Explanation: Eigenv avg adopters and seed
End of explanation
"""
bet_mean = np.mean(bet[:,1])
bet_mean_seed = np.mean(bet[:,0])
print 'Avg Seed:',bet_mean_seed, 'Avg adopters:',bet_mean
draw_avgs([pr_mean, eigen_mean, bet_mean])
"""
Explanation: Betweenness avg adopters and seed
End of explanation
"""
|
ngcm/training-public | FEEG6016 Simulation and Modelling/08-Finite-Elements-Lab-2.ipynb | mit | from IPython.core.display import HTML
css_file = 'https://raw.githubusercontent.com/ngcm/training-public/master/ipython_notebook_styles/ngcmstyle.css'
HTML(url=css_file)
"""
Explanation: Finite Elements Lab 2 Worksheet
End of explanation
"""
%matplotlib inline
import numpy
from matplotlib import pyplot
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
rcParams['figure.figsize'] = (12,6)
nodes = numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]])
IEN = numpy.array([[0, 1, 2],
[1, 3, 2]])
pyplot.figure()
pyplot.axis('equal')
pyplot.triplot(nodes[:,0], nodes[:,1], triangles=IEN, lw=2)
pyplot.plot(nodes[:,0], nodes[:,1], 'ro')
for e in range(nodes.shape[1]):
barycentre = numpy.mean(nodes[IEN[e,:],:], axis=0)
pyplot.text(barycentre[0], barycentre[1], "{}".format(e),
bbox=dict(facecolor='red', alpha=0.5))
for n in range(3):
pyplot.text(nodes[IEN[e,n],0]-0.07*(-1)**e,nodes[IEN[e,n],1]+0.07, r"${}_{{{}}}$".format(n,e),
bbox=dict(facecolor='blue', alpha=0.25 + 0.5*e))
for n in range(nodes.shape[0]):
pyplot.text(nodes[n,0]-0.07, nodes[n,1]-0.07, "{}".format(n),
bbox=dict(facecolor='green', alpha=0.3))
pyplot.xlim(-0.2, 1.2)
pyplot.ylim(-0.2, 1.2)
pyplot.xlabel(r"$x$")
pyplot.ylabel(r"$y$");
"""
Explanation: 2d problem
We've looked at the problem of finding the static temperature distribution in a bar. Now let's move on to finding the temperature distribution of a plate of length $1$ on each side. The temperature $T(x, y) = T(x_1, x_2)$ satisfies
$$
\nabla^2 T + f({\bf x}) = \left( \partial_{xx} + \partial_{yy} \right) T + f({\bf x}) = 0.
$$
We'll fix the temperature to be zero at the right edge, $T(1, y) = 0$. We'll allow heat to flow out of the other edges, giving the boundary conditions on all edges as
$$
\begin{align}
\partial_x T(0, y) &= 0, & T(1, y) &=0, \
\partial_y T(x, 0) &= 0, & \partial_y T(x, 1) &=0.
\end{align}
$$
Once again we want to write down the weak form by integrating by parts. To do that we rely on the divergence theorem,
$$
\int_{\Omega} \text{d}\Omega \, \nabla_i \phi = \int_{\Gamma} \text{d}\Gamma \, \phi n_i.
$$
Here $\Omega$ is the domain (which in our problem is the plate, $x, y \in [0, 1]$) and $\Gamma$ its boundary (in our problem the four lines $x=0, 1$ and $y=0, 1$), whilst ${\bf n}$ is the (inward-pointing) normal vector to the boundary.
We then multiply the strong form of the static heat equation by a weight function $w(x, y)$ and integrate by parts, using the divergence theorem, to remove the second derivative. To enforce the boundary conditions effectively we again choose the weight function to vanish where the value of the temperature is explicitly given, i.e. $w(1, y) = 0$. That is, we split the boundary $\Gamma$ into a piece $\Gamma_D$ where the boundary conditions are in Dirichlet form (the value $T$ is given) and a piece $\Gamma_N$ where the boundary conditions are in Neumann form (the value of the normal derivative $n_i \nabla_i T$ is given). We then enforce that on $\Gamma_D$ the weight function vanishes.
For our problem, this gives
$$
\int_{\Omega} \text{d} \Omega \, \nabla_i w \nabla_i T = \int_{\Omega} \text{d} \Omega \, w f.
$$
Re-writing for our explicit domain and our Cartesian coordinates we get
$$
\int_0^1 \text{d} y \, \int_0^1 \text{d} x \, \left( \partial_x w \partial_x T + \partial_y w \partial_y T \right) = \int_0^1 \text{d} y \, \int_0^1 \text{d} x \, w(x, y) f(x, y).
$$
This should be compared to the one dimensional case
$$
\int_0^1 \text{d}x \, \partial_x w(x) \partial_x T(x) = \int_0^1 \text{d}x \, w(x) f(x).
$$
We can now envisage using the same steps as the one dimensional case. Split the domain into elements, represent all functions in terms of known shape functions on each element, assemble the problems in each element to a single matrix problem, and then solve the matrix problem.
Elements
Here we will use triangular elements. As a simple example we'll split the plate into two triangles.
End of explanation
"""
ID = numpy.array([0,-1,1,-1])
"""
Explanation: What we're doing here is
Providing a list of nodes by their global coordinates.
Providing the element node array IEN which says how the elements are linked to the nodes.
We have that for element $e$ and local node number $a = 0, 1, 2$ the global node number is $A = IEN(e, a)$. This notation is sufficiently conventional that matplotlib recognizes it with its triplot/tripcolor/trisurf functions.
It is convention that the nodes are ordered in the anti-clockwise direction as the local number goes from $0$ to $2$.
The plot shows the
element numbers in the red boxes
the global node numbers in the green boxes
the local element numbers in the blue boxes (the subscript shows the element number).
We will need one final array, which is the $ID$ or destination array. This links the global node number to the global equation number in the final linear system. As the order of the equations in a linear system doesn't matter, this essentially encodes whether a node should have any equation in the linear system. Any node on $\Gamma_D$, where the value of the temperature is given, should not have an equation. In the example above the right edge is fixed, so nodes $1$ and $3$ lie on $\Gamma_D$ and should not have an equation. Thus in our case we have
End of explanation
"""
LM = numpy.zeros_like(IEN.T)
for e in range(IEN.shape[0]):
for a in range(IEN.shape[1]):
LM[a,e] = ID[IEN[e,a]]
LM
"""
Explanation: In the one dimensional case we used the location matrix or $LM$ array to link local node numbers in elements to equations. With the $IED$ and $ID$ arrays the $LM$ matrix is strictly redundant, as $LM(a, e) = ID(IEN(e, a))$. However, it's still standard to construct it:
End of explanation
"""
corners = numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]])
pyplot.plot(corners[:,0],corners[:,1],linewidth=2)
pyplot.xlabel(r"$\xi_0$")
pyplot.ylabel(r"$\xi_1$")
pyplot.axis('equal')
pyplot.ylim(-0.1,1.1);
"""
Explanation: Function representation and shape functions
We're going to want to write our unknown functions $T, w$ in terms of shape functions. These are easiest to write down for a single reference element, in the same way as we did for the one dimensional case where our reference element used the coordinates $\xi$. In two dimensions we'll use the reference coordinates $\xi_0, \xi_1$, and the standard "unit" triangle:
End of explanation
"""
def generate_2d_grid(Nx):
"""
Generate a triangular grid covering the plate math:`[0,1]^2` with Nx (pairs of) triangles in each dimension.
Parameters
----------
Nx : int
Number of triangles in any one dimension (so the total number on the plate is math:`2 Nx^2`)
Returns
-------
nodes : array of float
Array of (x,y) coordinates of nodes
IEN : array of int
Array linking elements to nodes
ID : array of int
Array linking nodes to equations
"""
Nnodes = Nx+1
x = numpy.linspace(0, 1, Nnodes)
y = numpy.linspace(0, 1, Nnodes)
X, Y = numpy.meshgrid(x,y)
nodes = numpy.zeros((Nnodes**2,2))
nodes[:,0] = X.ravel()
nodes[:,1] = Y.ravel()
ID = numpy.zeros(len(nodes), dtype=numpy.int)
n_eq = 0
for nID in range(len(nodes)):
if nID % Nnodes == Nx:
ID[nID] = -1
else:
ID[nID] = n_eq
n_eq += 1
IEN = numpy.zeros((2*Nx**2,3), dtype=numpy.int)
for i in range(Nx):
for j in range(Nx):
IEN[2*i+2*j*Nx , :] = i+j*Nnodes, i+1+j*Nnodes, i+(j+1)*Nnodes
IEN[2*i+1+2*j*Nx, :] = i+1+j*Nnodes, i+1+(j+1)*Nnodes, i+(j+1)*Nnodes
return nodes, IEN, ID
"""
Explanation: The shape functions on this triangle are
\begin{align}
N_0(\xi_0, \xi_1) &= 1 - \xi_0 - \xi_1, \
N_1(\xi_0, \xi_1) &= \xi_0, \
N_2(\xi_0, \xi_1) &= \xi_1.
\end{align}
The derivatives are all either $0$ or $\pm 1$.
As soon as we have the shape functions, our weak form becomes
$$
\sum_A T_A \int_{\Omega} \text{d}\Omega \, \left( \partial_{x} N_A (x, y) \partial_{x} N_B(x, y) + \partial_{y} N_A(x, y) \partial_{y} N_B(x, y) \right) = \int_{\Omega} \text{d}\Omega \, N_B(x, y) f(x, y).
$$
If we restrict to a single element the weak form becomes
$$
\sum_A T_A \int_{\triangle} \text{d}\triangle \, \left( \partial_{x} N_A (x, y) \partial_{x} N_B(x, y) + \partial_{y} N_A(x, y) \partial_{y} N_B(x, y) \right) = \int_{\triangle} \text{d}\triangle \, N_B(x, y) f(x, y).
$$
We need to map the triangle and its $(x, y) = {\bf x}$ coordinates to the reference triangle and its $(\xi_0, \xi_1) = {\bf \xi}$ coordinates. We also need to work out the integrals that appear in the weak form. We need the transformation formula
$$
\int_{\triangle} \text{d}\triangle \, \phi(x, y) = \int_0^1 \text{d}\xi_1 \, \int_0^{1-\xi_1} \text{d}\xi_0 \, \phi \left( x(\xi_0, \xi_1), y(\xi_0, \xi_1) \right) j(\xi_0, \xi_1),
$$
where the Jacobian matrix $J$ is
$$
J = \left[ \frac{\partial {\bf x}}{\partial {\bf \xi}} \right] = \begin{pmatrix} \partial_{\xi_0} x & \partial_{\xi_1} x \ \partial_{\xi_0} y & \partial_{\xi_1} y \end{pmatrix}
$$
and hence the Jacobian determinant $j$ is
$$
j = \det{J} = \det \left[ \frac{\partial {\bf x}}{\partial {\bf \xi}} \right] = \det \begin{pmatrix} \partial_{\xi_0} x & \partial_{\xi_1} x \ \partial_{\xi_0} y & \partial_{\xi_1} y \end{pmatrix}.
$$
We will also need the Jacobian matrix when writing the derivatives of the shape functions in terms of the coordinates on the reference triangle, i.e.
$$
\begin{pmatrix} \partial_x N_A & \partial_y N_A \end{pmatrix} = \begin{pmatrix} \partial_{\xi_0} N_A & \partial_{\xi_1} N_A \end{pmatrix} J^{-1} .
$$
The integral over the reference triangle can be directly approximated using, for example, Gauss quadrature. To second order we have
$$
\int_0^1 \text{d}\xi_1 \, \int_0^{1-\xi_1} \text{d}\xi_0 \, \psi \left( x(\xi_0, \xi_1), y(\xi_0, \xi_1) \right) \simeq \frac{1}{6} \sum_{j = 1}^{3} \psi \left( x((\xi_0)_j, (\xi_1)_j), y((\xi_0)_j, (\xi_1)_j) \right)
$$
where
$$
\begin{align}
(\xi_0)_1 &= \frac{1}{6}, & (\xi_1)_1 &= \frac{1}{6}, \
(\xi_0)_2 &= \frac{4}{6}, & (\xi_1)_2 &= \frac{1}{6}, \
(\xi_0)_3 &= \frac{1}{6}, & (\xi_1)_3 &= \frac{4}{6}.
\end{align}
$$
Finally, we need to map from the coordinates ${\bf \xi}$ to the coordinates ${\bf x}$. This is straightforward if we think of writing each component $(x, y)$ in terms of the shape functions. So for element $e$ with node locations $(x^e_a, y^e_a)$ for local node number $a = 0, 1, 2$ we have
$$
\begin{align}
x &= x^e_0 N_0(\xi_0, \xi_1) + x^e_1 N_1(\xi_0, \xi_1) + x^e_2 N_2(\xi_0, \xi_1), \
y &= y^e_0 N_0(\xi_0, \xi_1) + y^e_1 N_1(\xi_0, \xi_1) + y^e_2 N_2(\xi_0, \xi_1).
\end{align}
$$
Tasks
Write a function that, given ${\bf \xi}$, returns that shape functions at that location.
Write a function that, given ${\bf \xi}$, returns the derivatives of the shape functions at that location.
Write a function that, given the (global) locations ${\bf x}$ of the nodes of a triangular element and the local coordinates ${\bf \xi}$ within the element returns the corresponding global coordinates.
Write a function that, given the (global) locations ${\bf x}$ of the nodes of a triangular element and the local coordinates ${\bf \xi}$, returns the Jacobian matrix at that location.
Write a function that, given the (global) locations ${\bf x}$ of the nodes of a triangular element and the local coordinates ${\bf \xi}$, returns the determinant of the Jacobian matrix at that location.
Write a function that, given the (global) locations ${\bf x}$ of the nodes of a triangular element and the local coordinates ${\bf \xi}$ within the element returns the derivatives $\partial_{\bf x} N_a = J^{-1} \partial_{\bf \xi} N_a$.
Write a function that, given a function $\psi({\bf \xi})$, returns the quadrature of $\psi$ over the reference triangle.
Write a function that, given the (global) locations of the nodes of a triangular element and a function $\phi(x, y)$, returns the quadrature of $\phi$ over the element.
Test all of the above by integrating simple functions (eg $1, \xi, \eta, x, y$) over the elements above.
More tasks
Write a function to compute the coefficients of the stiffness matrix for a single element,
$$
k^e_{ab} = \int_{\triangle^e} \text{d}\triangle^e \, \left( \partial_{x} N_a (x, y) \partial_{x} N_b(x, y) + \partial_{y} N_a(x, y) \partial_{y} N_b(x, y) \right).
$$
Write a function to compute the coefficients of the force vector for a single element,
$$
f^e_b = \int_{\triangle^e} \text{d}\triangle^e \, N_b(x, y) f(x, y).
$$
Algorithm
This gives our full algorithm:
Set number of elements $N_{\text{elements}}$.
Set node locations ${\bf x}A, A = 0, \dots, N{\text{nodes}}$. Note that there is no longer a direct connection between the number of nodes and elements.
Set up the $IEN$ and $ID$ arrays linking elements to nodes and elements to equation numbers. From these set the location matrix $LM$. Work out the required number of equations $N_{\text{equations}}$ (the maximum of the $ID$ array plus $1$).
Set up arrays of zeros for the global stiffness matrix (size $N_{\text{equations}} \times N_{\text{equations}}$) and force vector (size $N_{\text{equations}}$).
For each element:
Form the element stiffness matrix $k^e_{ab}$.
Form the element force vector $f^e_a$.
Add the contributions to the global stiffness matrix and force vector
Solve $K {\bf T} = {\bf F}$.
Algorithm tasks
Write a function that given a list of nodes and the $IEN$ and $ID$ arrays and returns the solution ${\bf T}$.
Test on the system $f(x, y) = 1$ with exact solution $T = (1-x^2)/2$.
For a more complex case with the same boundary conditions try
$$
f(x, y) = x^2 (x - 1) \left( y^2 + 4 y (y - 1) + (y - 1)^2 \right) + (3 x - 1) y^2 (y - 1)^2
$$
with exact solution
$$
T(x, y) = \tfrac{1}{2} x^2 (1 - x) y^2 (1 - y)^2.
$$
A useful function is a grid generator or mesher. Good meshers are generally hard: here is a very simple one for this specific problem.
End of explanation
"""
|
jhillairet/scikit-rf | doc/source/examples/metrology/Measuring a Mutiport Device with a 2-Port Network Analyzer.ipynb | bsd-3-clause | import skrf as rf
from itertools import combinations
%matplotlib inline
from pylab import *
rf.stylely()
"""
Explanation: Measuring a Multiport Device with a 2-Port Network Analyzer
Introduction
In microwave measurements, one commonly needs to measure a n-port device with a m-port network analyzer ($m<n$ of course).
<img src="nports_with_2ports.svg"/>
This can be done by terminating each non-measured port with a matched load, and assuming the reflected power is negligible. With multiple measurements, it is then possible to reconstitute the original n-port. The first section of this example illustrates this method.
However, in some cases this may not provide the most accurate results, or even be possible in all measurement environments. Or, sometime it is not possible to have matched loads for all ports. The second part of this example presents an elegant solution to this problem, using impedance renormalization. We'll call it Tippet's technique, because it has a good ring to it.
End of explanation
"""
tee = rf.data.tee
print(tee)
"""
Explanation: Matched Ports
Let's assume that you have a 2-ports VNA. In order to measure a n-port network, you will need at least $p=n(n-1)/2$ measurements between the different pair of ports (total number of unique pairs of a set of n).
For example, let's assume we wants to measure a 3-ports network with a 2-ports VNA. One needs to perform at least 3 measurements: between ports 1 & 2, between ports 2 & 3 and between ports 1 & 3. We will assume these measurements are then converted into three 2-ports Network. To build the full 3-ports Network, one needs to provide a list of these 3 (sub)networks to the scikit-rf builtin function n_twoports_2_nport. While the order of the measurements in the list is not important, pay attention to define the Network.name properties of these subnetworks to contain the port index, for example p12 for the measurement between ports 1&2 or p23 between 2&3, etc.
Let's suppose we want to measure a tee:
End of explanation
"""
# 2 port Networks as if one measures the tee with a 2 ports VNA
tee12 = rf.subnetwork(tee, [0, 1]) # 2 port Network btw ports 1 & 2, port 3 being matched
tee23 = rf.subnetwork(tee, [1, 2]) # 2 port Network btw ports 2 & 3, port 1 being matched
tee13 = rf.subnetwork(tee, [0, 2]) # 2 port Network btw ports 1 & 3, port 2 being matched
"""
Explanation: For the sake of the demonstration, we will "fake" the 3 distinct measurements by extracting 3 subsets of the original Network, i.e., 3 subnetworks:
End of explanation
"""
tee12.name = 'tee12'
tee23.name = 'tee23'
tee13.name = 'tee13'
"""
Explanation: In reality of course, these three Networks comes from three measurements with distinct pair of ports, the non-used port being properly matched.
Before using the n_twoports_2_nport function, one must define the name of these subsets by setting the Network.name property, in order the function to know which corresponds to what:
End of explanation
"""
ntw_list = [tee12, tee23, tee13]
tee_rebuilt = rf.n_twoports_2_nport(ntw_list, nports=3)
print(tee_rebuilt)
# this is an ideal example, both Network are thus identical
print(tee == tee_rebuilt)
"""
Explanation: Now we can build the 3-ports Network from these three 2-port subnetworks:
End of explanation
"""
wg = rf.wr10
wg.frequency.npoints = 101
"""
Explanation: Tippet's Technique
This example demonstrates a numerical test of the technique described in "A Rigorous Technique for Measuring the Scattering Matrix of a Multiport Device with a 2-Port Network Analyzer" [1].
In Tippets technique, several sub-networks are measured in a similar way as before, but the port terminations are not assumed to be matched. Instead, the terminations just have to be known and no more than one can be completely reflective. So, in general $|\Gamma| \ne 1$.
During measurements, each port is terminated with a consistent termination. So port 1 is always terminated with $Z_1$ when not being measured. Once measured, each sub-network is re-normalized to these port impedances. Think about that. Finally, the composite network is constructed, and may then be re-normalized to the desired system impedance, say $50$ ohm.
[1] J. C. Tippet and R. A. Speciale, โA Rigorous Technique for Measuring the Scattering Matrix of a Multiport Device with a 2-Port Network Analyzer,โ IEEE Transactions on Microwave Theory and Techniques, vol. 30, no. 5, pp. 661โ666, May 1982.
Outline of Tippet's Technique
Following the example given in [1], measuring a 4-port network with a 2-port network analyzer.
An outline of the technique:
Calibrate 2-port network analyzer
Get four known terminations ($Z_1, Z_2, Z_3,Z_4$). No more than one can have $|\Gamma| = 1$
Measure all combinations of 2-port subnetworks (there are 6). Each port not currently being measured must be terminated with its corresponding load.
Renormalize each subnetwork to the impedances of the loads used to terminate it when note being measured.
Build composite 4-port, renormalize to VNA impedance.
Implementation
First, we create a Media object, which is used to generate networks for testing. We will use WR-10 Rectangular waveguide.
End of explanation
"""
dut = wg.random(n_ports = 4,name= 'dut')
dut
"""
Explanation: Next, lets generate a random 4-port network which will be the DUT, that we are trying to measure with out 2-port network analyzer.
End of explanation
"""
loads = [wg.load(.1+.1j),
wg.load(.2-.2j),
wg.load(.3+.3j),
wg.load(.5),
]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
"""
Explanation: Now, we need to define the loads used to terminate each port when it is not being measured, note as described in [1] not more than one can be have full reflection, $|\Gamma| = 1$
End of explanation
"""
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
port_combos
"""
Explanation: Create required measurement port combinations. There are 6 different measurements required to measure a 4-port with a 2-port VNA. In general, #measurements = $n\choose 2$, for n-port DUT on a 2-port VNA.
End of explanation
"""
composite = wg.match(nports = 4) # composite network, to be filled.
measured,measured_renorm = {},{} # measured subnetworks and renormalized sub-networks
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from load z0 to 50 ohms
composite.renormalize(50)
"""
Explanation: Now to do it. Ok we loop over the port combo's and connect the loads to the right places, simulating actual measurements. Each raw subnetwork measurement is saved, along with the renormalized subnetwork. Finally, we stuff the result into the 4-port composit network.
End of explanation
"""
measured_renorm
"""
Explanation: Results
Self-Consistency
Note that 6-measurements of 2-port subnetworks works out to 24 s-parameters, and we only need 16. This is because each reflect, s-parameter is measured three-times. As, in [1], we will use this redundant measurement as a check of our accuracy.
The renormalized networks are stored in a dictionary with names based on their port indices, from this you can see that each have been renormalized to the appropriate z0.
End of explanation
"""
s11_set = rf.NS([measured[k] for k in measured if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
"""
Explanation: Plotting all three raw measurements of $S_{11}$, we can see that they are not in agreement. These plots answer to plots 5 and 7 of [1]
End of explanation
"""
s11_set = rf.NS([measured_renorm[k] for k in measured_renorm if k[0]=='0'])
figure(figsize = (8,4))
subplot(121)
s11_set .plot_s_db(0,0)
subplot(122)
s11_set .plot_s_deg(0,0)
tight_layout()
"""
Explanation: However, the renormalized measurements agree perfectly. These plots answer to plots 6 and 8 of [1]
End of explanation
"""
composite == dut
"""
Explanation: Test For Accuracy
Making sure our composite network is the same as our DUT
End of explanation
"""
sum((composite - dut).s_mag)
"""
Explanation: Nice!. How close ?
End of explanation
"""
def tippits(dut, gamma, noise=None):
"""simulate tippits technique on a 4-port dut.
"""
ports = arange(dut.nports)
port_combos = list(combinations(ports, 2))
loads = [wg.load(gamma) for k in ports]
# construct the impedance array, of shape FXN
z_loads = array([k.z.flatten() for k in loads]).T
composite = wg.match(nports = dut.nports) # composite network, to be filled.
# ports `a` and `b` are the ports we will connect the VNA too
for a,b in port_combos:
# port `c` and `d` are the ports which we will connect the loads too
c,d =ports[(ports!=a)& (ports!=b)]
# determine where `d` will be on four_port, after its reduced to a three_port
e = where(ports[ports!=c]==d)[0][0]
# connect loads
three_port = rf.connect(dut,c, loads[c],0)
two_port = rf.connect(three_port,e, loads[d],0)
if noise is not None:
two_port.add_noise_polar(*noise)
# save raw and renormalized 2-port subnetworks
measured['%i%i'%(a,b)] = two_port.copy()
two_port.renormalize(c_[z_loads[:,a],z_loads[:,b]])
measured_renorm['%i%i'%(a,b)] = two_port.copy()
# stuff this 2-port into the composite 4-port
for i,m in enumerate([a,b]):
for j,n in enumerate([a,b]):
composite.s[:,m,n] = two_port.s[:,i,j]
# properly copy the port impedances
composite.z0[:,a] = two_port.z0[:,0]
composite.z0[:,b] = two_port.z0[:,1]
# finally renormalize from load z0 to 50 ohms
composite.renormalize(50)
return composite
wg.frequency.npoints = 11
dut = wg.random(4)
def er(gamma, *args):
return max(abs(tippits(dut, rf.db_2_mag(gamma),*args).s_db-dut.s_db).flatten())
gammas = linspace(-40,-0.1,11)
title('Error vs $|\Gamma|$')
plot(gammas, [er(k) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
figure()
noise = (1e-5,.1)
title('Error vs $|\Gamma|$ with reasonable noise')
plot(gammas, [er(k, noise) for k in gammas])
semilogy()
xlabel('$|\Gamma|$ of Loads (dB)')
ylabel('Max Error in DUT\'s dB(S)')
"""
Explanation: Dang!
Practical Application
This could be used in many ways. In waveguide, one could just make a measurement of a radiating open after a standard two-port calibration (like TRL). Then using Tippets technique, you can leave each port wide open while not being measured. This way you dont have to buy a bunch of loads. How sweet would that be?
More Complex Simulations
End of explanation
"""
|
metpy/MetPy | v0.9/_downloads/ef4bfbf049be071a6c648d7918a50105/Simple_Sounding.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
# Change default to be better for skew-T
plt.rcParams['figure.figsize'] = (9, 9)
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
"""
Explanation: Simple Sounding
Use MetPy as straightforward as possible to make a Skew-T LogP plot.
End of explanation
"""
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
# Example of defining your own vertical barb spacing
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
# Set spacing interval--Every 50 mb from 1000 to 100 mb
my_interval = np.arange(100, 1000, 50) * units('mbar')
# Get indexes of values closest to defined interval
ix = mpcalc.resample_nn_1d(p, my_interval)
# Plot only values nearest to defined interval values
skew.plot_barbs(p[ix], u[ix], v[ix])
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
# Show the plot
plt.show()
"""
Explanation: We will pull the data out of the example dataset into individual variables and
assign units.
End of explanation
"""
|
nadvamir/deep-learning | weight-initialization/weight_initialization.ipynb | mit | %matplotlib inline
import tensorflow as tf
import helper
from tensorflow.examples.tutorials.mnist import input_data
print('Getting MNIST Dataset...')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print('Data Extracted.')
"""
Explanation: Weight Initialization
In this lesson, you'll learn how to find good initial weights for a neural network. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker.
Testing Weights
Dataset
To see how different weights perform, we'll test on the same dataset and neural network. Let's go over the dataset and neural network.
We'll be using the MNIST dataset to demonstrate the different initial weights. As a reminder, the MNIST dataset contains images of handwritten numbers, 0-9, with normalized input (0.0 - 1.0). Run the cell below to download and load the MNIST dataset.
End of explanation
"""
# Save the shapes of weights for each layer
layer_1_weight_shape = (mnist.train.images.shape[1], 256)
layer_2_weight_shape = (256, 128)
layer_3_weight_shape = (128, mnist.train.labels.shape[1])
"""
Explanation: Neural Network
<img style="float: left" src="images/neural_network.png"/>
For the neural network, we'll test on a 3 layer neural network with ReLU activations and an Adam optimizer. The lessons you learn apply to other neural networks, including different activations and optimizers.
End of explanation
"""
all_zero_weights = [
tf.Variable(tf.zeros(layer_1_weight_shape)),
tf.Variable(tf.zeros(layer_2_weight_shape)),
tf.Variable(tf.zeros(layer_3_weight_shape))
]
all_one_weights = [
tf.Variable(tf.ones(layer_1_weight_shape)),
tf.Variable(tf.ones(layer_2_weight_shape)),
tf.Variable(tf.ones(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'All Zeros vs All Ones',
[
(all_zero_weights, 'All Zeros'),
(all_one_weights, 'All Ones')])
"""
Explanation: Initialize Weights
Let's start looking at some initial weights.
All Zeros or Ones
If you follow the principle of Occam's razor, you might think setting all the weights to 0 or 1 would be the best solution. This is not the case.
With every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust.
Let's compare the loss with all ones and all zero weights using helper.compare_init_weights. This function will run two different initial weights on the neural network above for 2 epochs. It will plot the loss for the first 100 batches and print out stats after the 2 epochs (~860 batches). We plot the first 100 batches to better judge which weights performed better at the start.
Run the cell below to see the difference between weights of all zeros against all ones.
End of explanation
"""
helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3))
"""
Explanation: As you can see the accuracy is close to guessing for both zeros and ones, around 10%.
The neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run.
A good solution for getting these random weights is to sample from a uniform distribution.
Uniform Distribution
A [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous%29) has the equal probability of picking any number from a set of numbers. We'll be picking from a continous distribution, so the chance of picking the same number is low. We'll use TensorFlow's tf.random_uniform function to pick random numbers from a uniform distribution.
tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded.
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type dtype. The lower bound on the range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point.
dtype: The type of the output: float32, float64, int32, or int64.
seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.
name: A name for the operation (optional).
We can visualize the uniform distribution by using a histogram. Let's map the values from tf.random_uniform([1000], -3, 3) to a histogram using the helper.hist_dist function. This will be 1000 random float values from -3 to 3, excluding the value 3.
End of explanation
"""
# Default for tf.random_uniform is minval=0 and maxval=1
basline_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape)),
tf.Variable(tf.random_uniform(layer_2_weight_shape)),
tf.Variable(tf.random_uniform(layer_3_weight_shape))
]
helper.compare_init_weights(
mnist,
'Baseline',
[(basline_weights, 'tf.random_uniform [0, 1)')])
"""
Explanation: The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2.
Now that you understand the tf.random_uniform function, let's apply it to some initial weights.
Baseline
Let's see how well the neural network trains using the default values for tf.random_uniform, where minval=0.0 and maxval=1.0.
End of explanation
"""
uniform_neg1to1_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1))
]
helper.compare_init_weights(
mnist,
'[0, 1) vs [-1, 1)',
[
(basline_weights, 'tf.random_uniform [0, 1)'),
(uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')])
"""
Explanation: The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction.
General rule for setting weights
The general rule for setting the weights in a neural network is to be close to zero without being too small. A good pracitce is to start your weights in the range of $[-y, y]$ where
$y=1/\sqrt{n}$ ($n$ is the number of inputs to a given neuron).
Let's see if this holds true, let's first center our range over zero. This will give us the range [-1, 1).
End of explanation
"""
uniform_neg01to01_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1))
]
uniform_neg001to001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01))
]
uniform_neg0001to0001_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001))
]
helper.compare_init_weights(
mnist,
'[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)',
[
(uniform_neg1to1_weights, '[-1, 1)'),
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(uniform_neg001to001_weights, '[-0.01, 0.01)'),
(uniform_neg0001to0001_weights, '[-0.001, 0.001)')],
plot_n_batches=None)
"""
Explanation: We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small?
Too small
Let's compare [-0.1, 0.1), [-0.01, 0.01), and [-0.001, 0.001) to see how small is too small. We'll also set plot_n_batches=None to show all the batches in the plot.
End of explanation
"""
import numpy as np
general_rule_weights = [
tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))),
tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0])))
]
helper.compare_init_weights(
mnist,
'[-0.1, 0.1) vs General Rule',
[
(uniform_neg01to01_weights, '[-0.1, 0.1)'),
(general_rule_weights, 'General Rule')],
plot_n_batches=None)
"""
Explanation: Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$.
End of explanation
"""
helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000]))
"""
Explanation: The range we found and $y=1/\sqrt{n}$ are really close.
Since the uniform distribution has the same chance to pick anything in the range, what if we used a distribution that had a higher chance of picking numbers closer to 0. Let's look at the normal distribution.
Normal Distribution
Unlike the uniform distribution, the normal distribution has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from TensorFlow's tf.random_normal function to a histogram.
tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
Outputs random values from a normal distribution.
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type dtype. The mean of the normal distribution.
stddev: A 0-D Tensor or Python value of type dtype. The standard deviation of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.
name: A name for the operation (optional).
End of explanation
"""
normal_01_weights = [
tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Uniform [-0.1, 0.1) vs Normal stddev 0.1',
[
(uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'),
(normal_01_weights, 'Normal stddev 0.1')])
"""
Explanation: Let's compare the normal distribution against the previous uniform distribution.
End of explanation
"""
helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000]))
"""
Explanation: The normal distribution gave a slight increasse in accuracy and loss. Let's move closer to 0 and drop picked numbers that are x number of standard deviations away. This distribution is called Truncated Normal Distribution.
Truncated Normal Distribution
tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type dtype. The mean of the truncated normal distribution.
stddev: A 0-D Tensor or Python value of type dtype. The standard deviation of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior.
name: A name for the operation (optional).
End of explanation
"""
trunc_normal_01_weights = [
tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)),
tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1))
]
helper.compare_init_weights(
mnist,
'Normal vs Truncated Normal',
[
(normal_01_weights, 'Normal'),
(trunc_normal_01_weights, 'Truncated Normal')])
"""
Explanation: Again, let's compare the previous results with the previous distribution.
End of explanation
"""
helper.compare_init_weights(
mnist,
'Baseline vs Truncated Normal',
[
(basline_weights, 'Baseline'),
(trunc_normal_01_weights, 'Truncated Normal')])
"""
Explanation: There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations.
We've come a long way from the first set of weights we tested. Let's see the difference between the weights we used then and now.
End of explanation
"""
|
UWSEDS/LectureNotes | PreFall2018/Objects/Building Software With Objects.ipynb | bsd-2-clause | from IPython.display import Image
Image(filename='Classes_vs_Objects.png')
"""
Explanation: Why Objects?
Provide modularity and reuse through hierarchical structures
Object oriented programming is a different way of thinking.
Programming With Objects
End of explanation
"""
# Definiting a Car class
class Car(object):
pass
"""
Explanation: Initial concepts
An object is a container of data (attributes) and code (methods)
A class is a template for creating objects
Reuse is provided by:
reusing the same class to create many objects
"inheriting" data and code from other classes
End of explanation
"""
from IPython.display import Image
Image(filename='ClassAttributes.png')
"""
Explanation: Attributes
End of explanation
"""
class Car(object):
# The following method is called when the class
# is created or "constructed". The variables "self.x" refers
# to the variable "x" in a created object.
def __init__(self, color, car_type, speed):
self.color = color
self.car_type = car_type
self.speed = speed
class Car(object):
# The following method is called when the class
# is created or "constructed". The variables "self.x" refers
# to the variable "x" in a created object.
def __init__(self, color, car_type, speed):
self.color = color
self.car_type = car_type
self.speed = speed
# Creating an object for a class with arguments in the __init__ method
car = Car("Blue", "HatchBack", 100)
car.color
# Creating an object for a class with arguments in the __init__ method
joe_car = Car("Blue", "Sedan", 100)
dave_car = Car("Red", "Sports", 150)
print ("Type of joe_car is %s. Type of dave_car is %s"% (type(joe_car), type(dave_car)))
# Accessed instance attributes
joe_car = Car("Blue", "Sedan", 100)
print ("Type of joe_car has (color, type, speed)=%s." % str((joe_car.color, joe_car.car_type, joe_car.speed)))
"""
Explanation: Attributes are data associated with an object (instance) or class. Object attributes (and methods) are specified by using "self". Instance attributes and methods are accessed using the dot "." operator.
End of explanation
"""
from IPython.display import Image
Image(filename='InstanceMethods.png')
#Class diagram
from IPython.display import Image
Image(filename='SingleClassDiagram.png', width=200, height=200)
"""
Explanation: EXERCISE: Change the constructor for Car to include the attribute "doors".
Instance Methods
End of explanation
"""
class Car(object):
def __init__(self, color, car_type, speed):
"""
:param str color:
:param str car_type:
:param int speed:
"""
self.color = color
self.car_type = car_type
self.speed = speed
def start(self):
print ("%s %s started!" % (self.color, self.car_type))
def stop(self):
pass
def turn(self, direction):
"""
:parm str direction: left or right
"""
pass
car = Car("Blue", "Sedan", 100)
car.start()
"""
Explanation: A class diagram provides a more compact representation of a class. There are three sections.
- Class name
- Attributes
- Methods
Instance methods
- functions associated with the objects constructed for a class
- provide a way to transform data in objects
- use instance attributes (references to variables beginning with "self.")
End of explanation
"""
from IPython.display import Image
Image(filename='SimpleClassHierarchy.png', width=400, height=400)
# Code for inheritance
class Sedan(Car):
# Sedan inherits from car
def __init__(self, color, speed):
"""
:param str color:
:param int speed:
"""
super().__init__(color, "Sedan", speed)
def play_cd(self):
print ("Playing cd in %s sedan" % self.color)
sedan = Sedan("Yellow", 1e6)
sedan.play_cd()
sedan.car_type
joe_car = Sedan("Blue", 100)
print ("Type of joe_car has (color, type, speed)=%s." % str((joe_car.color, joe_car.car_type, joe_car.speed)))
"""
Explanation: EXERCISE: Implement the stop and turn methods. Run the methods.
Inheritance
Inheritance is a common way that classes reuse data and code from other classes. A child class or derived class gets attributes and methods from its parent class.
Programmatically:
- Specify inheritance in the class statement
- Constructor for derived class (class that inherits) have access to the constructor of its parent.
Inheritance is represented in diagrams as an arror from the child class to its parent class.
End of explanation
"""
from IPython.display import Image
Image(filename='ClassInheritance.png', width=400, height=400)
"""
Explanation: Exercise: Implement SportsCar and create dave_car from SportsCar. Print attributes of dave_car.
End of explanation
"""
from IPython.display import Image
Image(filename='ATMClassDiagram.png', width=400, height=400)
"""
Explanation: Subclasses can have their own methods.
Exercise: Add the play_cd() to Sedan and play_bluetooth() method to SportsCar. Construct a test to run these methods.
What Else?
Class attributes
Class methods
Object Oriented Design
A design methodology must specify:
- Components: What they do and how to build them
- Interactions: How the components interact to implement use cases
Object oriented designed
- Components are specified by class diagrams.
- Interactions are specified by interaction diagrams.
Class diagram for the ATM system
End of explanation
"""
from IPython.display import Image
Image(filename='ATMAuthentication.png', width=800, height=800)
"""
Explanation: The diamond arrow is a "has-a" relationship. For example, the Controller has-a ATMInput. This means that a Controller object has an instance variable for an ATMInput object.
Interaction Diagram for the ATM System
An interaction diagram specifies how components interact to achieve a use case.
Interactions are from one object to another object, indicating that the first object calls a method in the second object.
Rules for drawing lines in an interaction diagram:
- The calling object must know about the called object.
- The called object must have the method invoked by the calling object.
End of explanation
"""
from IPython.display import Image
Image(filename='SciSheetsCoreClasses.png', width=300, height=30)
"""
Explanation: Look at Objects/ATMDiagrams.pdf for a solution.
What Else in Design?
Other diagrams: state diagrams, package diagrams, ...
Object oriented design patterns
Complex Example of Class Hierarchy
End of explanation
"""
|
dnc1994/MachineLearning-UW | ml-clustering-and-retrieval/6_hierarchical_clustering.ipynb | mit | import graphlab
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import time
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
%matplotlib inline
"""
Explanation: Hierarchical Clustering
Hierarchical clustering refers to a class of clustering methods that seek to build a hierarchy of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means.
Note to Amazon EC2 users: To conserve memory, make sure to stop all the other notebooks before running this notebook.
Import packages
End of explanation
"""
wiki = graphlab.SFrame('people_wiki.gl/')
"""
Explanation: Load the Wikipedia dataset
End of explanation
"""
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
"""
Explanation: As we did in previous assignments, let's extract the TF-IDF features:
End of explanation
"""
from em_utilities import sframe_to_scipy # converter
# This will take about a minute or two.
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
"""
Explanation: To run k-means on this dataset, we should convert the data matrix into a sparse matrix.
End of explanation
"""
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
"""
Explanation: To be consistent with the k-means assignment, let's normalize all vectors to have unit norm.
End of explanation
"""
def bipartition(cluster, maxiter=400, num_runs=4, seed=None):
'''cluster: should be a dictionary containing the following keys
* dataframe: original dataframe
* matrix: same data, in matrix format
* centroid: centroid for this particular cluster'''
data_matrix = cluster['matrix']
dataframe = cluster['dataframe']
# Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow.
kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=-1)
kmeans_model.fit(data_matrix)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
# Divide the data matrix into two parts using the cluster assignments.
data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \
data_matrix[cluster_assignment==1]
# Divide the dataframe into two parts, again using the cluster assignments.
cluster_assignment_sa = graphlab.SArray(cluster_assignment) # minor format conversion
dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \
dataframe[cluster_assignment_sa==1]
# Package relevant variables for the child clusters
cluster_left_child = {'matrix': data_matrix_left_child,
'dataframe': dataframe_left_child,
'centroid': centroids[0]}
cluster_right_child = {'matrix': data_matrix_right_child,
'dataframe': dataframe_right_child,
'centroid': centroids[1]}
return (cluster_left_child, cluster_right_child)
"""
Explanation: Bipartition the Wikipedia dataset using k-means
Recall our workflow for clustering text data with k-means:
Load the dataframe containing a dataset, such as the Wikipedia text dataset.
Extract the data matrix from the dataframe.
Run k-means on the data matrix with some value of k.
Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article).
Let us modify the workflow to perform bipartitioning:
Load the dataframe containing a dataset, such as the Wikipedia text dataset.
Extract the data matrix from the dataframe.
Run k-means on the data matrix with k=2.
Divide the data matrix into two parts using the cluster assignments.
Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization.
Visualize the bipartition of data.
We'd like to be able to repeat Steps 3-6 multiple times to produce a hierarchy of clusters such as the following:
(root)
|
+------------+-------------+
| |
Cluster Cluster
+------+-----+ +------+-----+
| | | |
Cluster Cluster Cluster Cluster
Each parent cluster is bipartitioned to produce two child clusters. At the very top is the root cluster, which consists of the entire dataset.
Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster:
dataframe: a subset of the original dataframe that correspond to member rows of the cluster
matrix: same set of rows, stored in sparse matrix format
centroid: the centroid of the cluster (not applicable for the root cluster)
Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters).
End of explanation
"""
wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster
left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=8, seed=1)
"""
Explanation: The following cell performs bipartitioning of the Wikipedia dataset. Allow 20-60 seconds to finish.
Note. For the purpose of the assignment, we set an explicit seed (seed=1) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs.
End of explanation
"""
left_child
"""
Explanation: Let's examine the contents of one of the two clusters, which we call the left_child, referring to the tree visualization above.
End of explanation
"""
right_child
"""
Explanation: And here is the content of the other cluster we named right_child.
End of explanation
"""
def display_single_tf_idf_cluster(cluster, map_index_to_word):
'''map_index_to_word: SFrame specifying the mapping betweeen words and column indices'''
wiki_subset = cluster['dataframe']
tf_idf_subset = cluster['matrix']
centroid = cluster['centroid']
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroid.argsort()[::-1]
for i in xrange(5):
print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroid[idx[i]])),
print('')
# Compute distances from the centroid to all data points in the cluster.
distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten()
# compute nearest neighbors of the centroid within the cluster.
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in xrange(8):
text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('')
"""
Explanation: Visualize the bipartition
We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid.
End of explanation
"""
display_single_tf_idf_cluster(left_child, map_index_to_word)
display_single_tf_idf_cluster(right_child, map_index_to_word)
"""
Explanation: Let's visualize the two child clusters:
End of explanation
"""
athletes = left_child
non_athletes = right_child
"""
Explanation: The left cluster consists of athletes, whereas the right cluster consists of non-athletes. So far, we have a single-level hierarchy consisting of two clusters, as follows:
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Athletes Non-athletes
Is this hierarchy good enough? When building a hierarchy of clusters, we must keep our particular application in mind. For instance, we might want to build a directory for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the athletes and non-athletes clusters.
Perform recursive bipartitioning
Cluster of athletes
To help identify the clusters we've built so far, let's give them easy-to-read aliases:
End of explanation
"""
# Bipartition the cluster of athletes
left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=8, seed=1)
"""
Explanation: Using the bipartition function, we produce two child clusters of the athlete cluster:
End of explanation
"""
display_single_tf_idf_cluster(left_child_athletes, map_index_to_word)
"""
Explanation: The left child cluster mainly consists of baseball players:
End of explanation
"""
display_single_tf_idf_cluster(right_child_athletes, map_index_to_word)
"""
Explanation: On the other hand, the right child cluster is a mix of football players and ice hockey players:
End of explanation
"""
baseball = left_child_athletes
ice_hockey_football = right_child_athletes
"""
Explanation: Note. Concerning use of "football"
The occurrences of the word "football" above refer to association football. This sports is also known as "soccer" in United States (to avoid confusion with American football). We will use "football" throughout when discussing topic representation.
Our hierarchy of clusters now looks like this:
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Athletes Non-athletes
+
|
+-----------+--------+
| |
| +
+ football/
baseball ice hockey
Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, we would like to achieve similar level of granularity for all clusters.
Notice that the right child cluster is more coarse than the left child cluster. The right cluster posseses a greater variety of topics than the left (ice hockey/football vs. baseball). So the right child cluster should be subdivided further to produce finer child clusters.
Let's give the clusters aliases as well:
End of explanation
"""
left, right = bipartition(ice_hockey_football, maxiter=100, num_runs=8, seed=1)
display_single_tf_idf_cluster(left, map_index_to_word)
display_single_tf_idf_cluster(right, map_index_to_word)
"""
Explanation: Cluster of ice hockey players and football players
In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights.
Quiz Question. Bipartition the cluster of ice hockey and football players. Which of the two child clusters should be futher subdivided?
Note. To achieve consistent results, use the arguments maxiter=100, num_runs=8, seed=1 when calling the bipartition function.
The left child cluster
The right child cluster
End of explanation
"""
# Bipartition the cluster of non-athletes
left_child_non_athletes, right_child_non_athletes = bipartition(non_athletes, maxiter=100, num_runs=8, seed=1)
display_single_tf_idf_cluster(left_child_non_athletes, map_index_to_word)
display_single_tf_idf_cluster(right_child_non_athletes, map_index_to_word)
"""
Explanation: Caution. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters.
If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster. Thus, we may be misled if we judge the purity of clusters solely by their top documents and words.
Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization. We may need to subdivide further to discover new topics. For instance, subdividing the ice_hockey_football cluster led to the appearance of golf.
Quiz Question. Which diagram best describes the hierarchy right after splitting the ice_hockey_football cluster? Refer to the quiz form for the diagrams.
Cluster of non-athletes
Now let us subdivide the cluster of non-athletes.
End of explanation
"""
scholars_politicians_etc = left_child_non_athletes
musicians_artists_etc = right_child_non_athletes
"""
Explanation: The first cluster consists of scholars, politicians, and government officials whereas the second consists of musicians, artists, and actors. Run the following code cell to make convenient aliases for the clusters.
End of explanation
"""
left, right = bipartition(scholars_politicians_etc, maxiter=100, num_runs=8, seed=1)
left2, right2 = bipartition(musicians_artists_etc, maxiter=100, num_runs=8, seed=1)
display_single_tf_idf_cluster(left, map_index_to_word)
display_single_tf_idf_cluster(right, map_index_to_word)
display_single_tf_idf_cluster(left2, map_index_to_word)
display_single_tf_idf_cluster(right2, map_index_to_word)
"""
Explanation: Quiz Question. Let us bipartition the clusters scholars_politicians_etc and musicians_artists_etc. Which diagram best describes the resulting hierarchy of clusters for the non-athletes? Refer to the quiz for the diagrams.
Note. Use maxiter=100, num_runs=8, seed=1 for consistency of output.
End of explanation
"""
|
5agado/data-science-learning | statistics/Statistics - Basic Theorems.ipynb | apache-2.0 | %matplotlib notebook
import numpy as np
import seaborn as sns
sns.set_context("paper")
"""
Explanation: Table of Contents
Law Of Large Numbers
Central Limit Theorem
Experiment: Sum Of N Dice
End of explanation
"""
# Define info for die population
die = np.arange(6)+1
die_dist = np.array([1/len(values)]*len(values))
# Expected value
sum([v*p for v, p in zip(die, die_dist)])
# Simulate N die rolls
num_rolls = 10000
rolls_res = np.random.choice(die, num_rolls, p=die_dist)
sns.plt.plot(np.arange(1, num_rolls), [rolls_res[:i].mean() for i in range(1, num_rolls)])
sns.plt.xscale('log')
sns.plt.show()
"""
Explanation: Law Of Large Numbers
?? Sample mean converges to population mean for number of samples tending to infinite
?? Average of results approximately equal to expected value (mean)
"As more observations are collected, the proportion $p_n$ of occurrences with a particular outcome converges to the probability $p$ of that outcome." OpenIntro Statistics
End of explanation
"""
sns.barplot(die, die_dist)
sns.plt.show()
np.random.choice(die, (5, 3), p=die_dist)
num_rolls = 1000
num_dice = [1, 2, 3, 4, 5]
fig, axes = sns.plt.subplots(len(num_dice))
fig.tight_layout()
for i, num in enumerate(num_dice):
rolls_res = np.random.choice(die, (num_rolls, num), p=die_dist).sum(axis=1)
sns.distplot(rolls_res, ax=axes[i])
#axes[i].set_xticklabels(axes[i].xaxis.get_majorticklabels(), rotation=30)
#axes[i].set_xlabel(name)
sns.plt.show()
"""
Explanation: Central Limit Theorem
?? The sum of values draw independently from a distribution approximate a normal distribution the more values we extract (regardless of the underline distribution)
"if we collect a large enough sample from a population, the sample mean should be equal to, more or less, the population mean"
End of explanation
"""
# Define info for die population
die = np.arange(6)+1
die_dist = np.array([1/len(values)]*len(values))
rolls_res = np.random.choice(die, (10000, 5), p=die_dist).sum(axis=1)
vals, counts = np.unique(rolls_res, return_counts=True)
num_vals = len(vals)
total_count = counts.sum()
sns.barplot(vals, counts/total_count)
sns.plt.show()
sns.plt.plot(vals, counts/total_count)
sns.plt.show()
data = counts/total_count
p = np.arange(26) / float(26)
sns.plt.plot(vals, np.cumsum(data))
sns.plt.show()
"""
Explanation: Experiment: Sum Of N Dice
End of explanation
"""
|
tensorflow/examples | courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
"""
Explanation: Forecasting with a CNN
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Setup
End of explanation
"""
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
"""
Explanation: Preprocessing With 1D-Convolutional Layers
End of explanation
"""
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint.h5")
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
"""
Explanation: Fully Convolutional Forecasting
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | CPB100/lab4a/demandforecast.ipynb | apache-2.0 | import google.datalab.bigquery as bq
import pandas as pd
import numpy as np
import shutil
%bq tables describe --name bigquery-public-data.new_york.tlc_yellow_trips_2015
"""
Explanation: Demand forecasting with BigQuery and TensorFlow
In this notebook, we will develop a machine learning model to predict the demand for taxi cabs in New York.
To develop the model, we will need to get historical data of taxicab usage. This data exists in BigQuery. Let's start by looking at the schema.
End of explanation
"""
%bq query
SELECT
EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber
FROM `bigquery-public-data.new_york.tlc_yellow_trips_2015`
LIMIT 5
"""
Explanation: Analyzing taxicab demand
Let's pull the number of trips for each day in the 2015 dataset using Standard SQL.
End of explanation
"""
%bq query -n taxiquery
WITH trips AS (
SELECT EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber
FROM `bigquery-public-data.new_york.tlc_yellow_trips_*`
where _TABLE_SUFFIX = @YEAR
)
SELECT daynumber, COUNT(1) AS numtrips FROM trips
GROUP BY daynumber ORDER BY daynumber
query_parameters = [
{
'name': 'YEAR',
'parameterType': {'type': 'STRING'},
'parameterValue': {'value': 2015}
}
]
trips = taxiquery.execute(query_params=query_parameters).result().to_dataframe()
trips[:5]
"""
Explanation: Modular queries and Pandas dataframe
Let's use the total number of trips as our proxy for taxicab demand (other reasonable alternatives are total trip_distance or total fare_amount). It is possible to predict multiple variables using Tensorflow, but for simplicity, we will stick to just predicting the number of trips.
We will give our query a name 'taxiquery' and have it use an input variable '$YEAR'. We can then invoke the 'taxiquery' by giving it a YEAR. The to_dataframe() converts the BigQuery result into a <a href='http://pandas.pydata.org/'>Pandas</a> dataframe.
End of explanation
"""
avg = np.mean(trips['numtrips'])
print('Just using average={0} has RMSE of {1}'.format(avg, np.sqrt(np.mean((trips['numtrips'] - avg)**2))))
"""
Explanation: Benchmark
Often, a reasonable estimate of something is its historical average. We can therefore benchmark our machine learning model against the historical average.
End of explanation
"""
%bq query
SELECT * FROM `bigquery-public-data.noaa_gsod.stations`
WHERE state = 'NY' AND wban != '99999' AND name LIKE '%LA GUARDIA%'
"""
Explanation: The mean here is about 400,000 and the root-mean-square-error (RMSE) in this case is about 52,000. In other words, if we were to estimate that there are 400,000 taxi trips on any given day, that estimate is will be off on average by about 52,000 in either direction.
Let's see if we can do better than this -- our goal is to make predictions of taxicab demand whose RMSE is lower than 52,000.
What kinds of things affect people's use of taxicabs?
Weather data
We suspect that weather influences how often people use a taxi. Perhaps someone who'd normally walk to work would take a taxi if it is very cold or rainy.
One of the advantages of using a global data warehouse like BigQuery is that you get to mash up unrelated datasets quite easily.
End of explanation
"""
%bq query -n wxquery
SELECT EXTRACT (DAYOFYEAR FROM CAST(CONCAT(@YEAR,'-',mo,'-',da) AS TIMESTAMP)) AS daynumber,
MIN(EXTRACT (DAYOFWEEK FROM CAST(CONCAT(@YEAR,'-',mo,'-',da) AS TIMESTAMP))) dayofweek,
MIN(min) mintemp, MAX(max) maxtemp, MAX(IF(prcp=99.99,0,prcp)) rain
FROM `bigquery-public-data.noaa_gsod.gsod*`
WHERE stn='725030' AND _TABLE_SUFFIX = @YEAR
GROUP BY 1 ORDER BY daynumber DESC
query_parameters = [
{
'name': 'YEAR',
'parameterType': {'type': 'STRING'},
'parameterValue': {'value': 2015}
}
]
weather = wxquery.execute(query_params=query_parameters).result().to_dataframe()
weather[:5]
"""
Explanation: Variables
Let's pull out the minimum and maximum daily temperature (in Fahrenheit) as well as the amount of rain (in inches) for La Guardia airport.
End of explanation
"""
data = pd.merge(weather, trips, on='daynumber')
data[:5]
"""
Explanation: Merge datasets
Let's use Pandas to merge (combine) the taxi cab and weather datasets day-by-day.
End of explanation
"""
j = data.plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: Exploratory analysis
Is there a relationship between maximum temperature and the number of trips?
End of explanation
"""
j = data.plot(kind='scatter', x='dayofweek', y='numtrips')
"""
Explanation: The scatterplot above doesn't look very promising. There appears to be a weak downward trend, but it's also quite noisy.
Is there a relationship between the day of the week and the number of trips?
End of explanation
"""
j = data[data['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: Hurrah, we seem to have found a predictor. It appears that people use taxis more later in the week. Perhaps New Yorkers make weekly resolutions to walk more and then lose their determination later in the week, or maybe it reflects tourism dynamics in New York City.
Perhaps if we took out the <em>confounding</em> effect of the day of the week, maximum temperature will start to have an effect. Let's see if that's the case:
End of explanation
"""
data2 = data # 2015 data
for year in [2014, 2016]:
query_parameters = [
{
'name': 'YEAR',
'parameterType': {'type': 'STRING'},
'parameterValue': {'value': year}
}
]
weather = wxquery.execute(query_params=query_parameters).result().to_dataframe()
trips = taxiquery.execute(query_params=query_parameters).result().to_dataframe()
data_for_year = pd.merge(weather, trips, on='daynumber')
data2 = pd.concat([data2, data_for_year])
data2.describe()
j = data2[data2['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: Removing the confounding factor does seem to reflect an underlying trend around temperature. But ... the data are a little sparse, don't you think? This is something that you have to keep in mind -- the more predictors you start to consider (here we are using two: day of week and maximum temperature), the more rows you will need so as to avoid <em> overfitting </em> the model.
Adding 2014 and 2016 data
Let's add in 2014 and 2016 data to the Pandas dataframe. Note how useful it was for us to modularize our queries around the YEAR.
End of explanation
"""
import tensorflow as tf
shuffled = data2.sample(frac=1, random_state=13)
# It would be a good idea, if we had more data, to treat the days as categorical variables
# with the small amount of data, we have though, the model tends to overfit
#predictors = shuffled.iloc[:,2:5]
#for day in range(1,8):
# matching = shuffled['dayofweek'] == day
# key = 'day_' + str(day)
# predictors[key] = pd.Series(matching, index=predictors.index, dtype=float)
predictors = shuffled.iloc[:,1:5]
predictors[:5]
shuffled[:5]
targets = shuffled.iloc[:,5]
targets[:5]
"""
Explanation: The data do seem a bit more robust. If we had even more data, it would be better of course. But in this case, we only have 2014-2016 data for taxi trips, so that's what we will go with.
Machine Learning with Tensorflow
We'll use 80% of our dataset for training and 20% of the data for testing the model we have trained. Let's shuffle the rows of the Pandas dataframe so that this division is random. The predictor (or input) columns will be every column in the database other than the number-of-trips (which is our target, or what we want to predict).
The machine learning models that we will use -- linear regression and neural networks -- both require that the input variables are numeric in nature.
The day of the week, however, is a categorical variable (i.e. Tuesday is not really greater than Monday). So, we should create separate columns for whether it is a Monday (with values 0 or 1), Tuesday, etc.
Against that, we do have limited data (remember: the more columns you use as input features, the more rows you need to have in your training dataset), and it appears that there is a clear linear trend by day of the week. So, we will opt for simplicity here and use the data as-is. Try uncommenting the code that creates separate columns for the days of the week and re-run the notebook if you are curious about the impact of this simplification.
End of explanation
"""
trainsize = int(len(shuffled['numtrips']) * 0.8)
avg = np.mean(shuffled['numtrips'][:trainsize])
rmse = np.sqrt(np.mean((targets[trainsize:] - avg)**2))
print('Just using average={0} has RMSE of {1}'.format(avg, rmse))
"""
Explanation: Let's update our benchmark based on the 80-20 split and the larger dataset.
End of explanation
"""
SCALE_NUM_TRIPS = 600000.0
trainsize = int(len(shuffled['numtrips']) * 0.8)
testsize = len(shuffled['numtrips']) - trainsize
npredictors = len(predictors.columns)
noutputs = 1
tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ...
shutil.rmtree('./trained_model_linear', ignore_errors=True) # so that we don't load weights from previous runs
estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear',
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values))
print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output")
def input_fn(features, targets):
return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS)
estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000)
pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS )
rmse = np.sqrt(np.mean(np.power((targets[trainsize:].values - pred), 2)))
print('LinearRegression has RMSE of {0}'.format(rmse))
"""
Explanation: Linear regression with tf.contrib.learn
We scale the number of taxicab rides by 400,000 so that the model can keep its predicted values in the [0-1] range. The optimization goes a lot faster when the weights are small numbers. We save the weights into ./trained_model_linear and display the root mean square error on the test dataset.
End of explanation
"""
SCALE_NUM_TRIPS = 600000.0
trainsize = int(len(shuffled['numtrips']) * 0.8)
testsize = len(shuffled['numtrips']) - trainsize
npredictors = len(predictors.columns)
noutputs = 1
tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ...
shutil.rmtree('./trained_model', ignore_errors=True) # so that we don't load weights from previous runs
estimator = tf.contrib.learn.DNNRegressor(model_dir='./trained_model',
hidden_units=[5, 5],
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values))
print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output")
def input_fn(features, targets):
return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS)
estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000)
pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS )
rmse = np.sqrt(np.mean((targets[trainsize:].values - pred)**2))
print('Neural Network Regression has RMSE of {0}'.format(rmse))
"""
Explanation: The RMSE here (57K) is lower than the benchmark (62K) indicates that we are doing about 10% better with the machine learning model than we would be if we were to just use the historical average (our benchmark).
Neural network with tf.contrib.learn
Let's make a more complex model with a few hidden nodes.
End of explanation
"""
input = pd.DataFrame.from_dict(data =
{'dayofweek' : [4, 5, 6],
'mintemp' : [60, 40, 50],
'maxtemp' : [70, 90, 60],
'rain' : [0, 0.5, 0]})
# read trained model from ./trained_model
estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear',
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(input.values))
pred = np.multiply(list(estimator.predict(input.values)), SCALE_NUM_TRIPS )
print(pred)
"""
Explanation: Using a neural network results in similar performance to the linear model when I ran it -- it might be because there isn't enough data for the NN to do much better. (NN training is a non-convex optimization, and you will get different results each time you run the above code).
Running a trained model
So, we have trained a model, and saved it to a file. Let's use this model to predict taxicab demand given the expected weather for three days.
Here we make a Dataframe out of those inputs, load up the saved model (note that we have to know the model equation -- it's not saved in the model file) and use it to predict the taxicab demand.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.24/_downloads/72bb0e260a352fd7c21fee1dd2f83d79/decoding_spoc_CMC.ipynb | bsd-3-clause | # Author: Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 200.) # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None)
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30.)
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=0.75)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.5, baseline=None,
detrend=1, decim=12)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.5, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
"""
Explanation: Continuous Target Decoding with SPoC
Source Power Comodulation (SPoC) :footcite:DahneEtAl2014 allows to identify
the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>_
End of explanation
"""
spoc.fit(X, y)
spoc.plot_patterns(meg_epochs.info)
"""
Explanation: Plot the contributions to the detected components (i.e., the forward model)
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | learning_rate.ipynb | apache-2.0 | !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Install Sklearn
!python3 -m pip install --user sklearn
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install tensorflow==2.1
"""
Explanation: Observing Learning Curve Changes
Learning Objectives
Learning objectives
xxxxxx
xxxxxxx
xxxxxxx
Introduction
In this notebook, you will observe learning curve change on ______. You will use the Keras Sequential Model to build an ML model to predict housing prices.
A learning curve is a plot of model learning performance over experience or time.
Learning curves are a widely used diagnostic tool in machine learning for algorithms that learn from a training dataset incrementally. The model can be evaluated on the training dataset and on a hold out validation dataset after each update during training and plots of the measured performance can created to show learning curves.
Reviewing learning curves of models during training can be used to diagnose problems with learning, such as an underfit or overfit model, as well as whether the training and validation datasets are suitably representative.
https://machinelearningmastery.com/learning-curves-for-diagnosing-machine-learning-model-performance/#:~:text=A%20learning%20curve%20is%20a,from%20a%20training%20dataset%20incrementally.&text=Learning%20curves%20are%20plots%20that,time%20in%20terms%20of%20experience.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
Start by importing the necessary libraries for this lab.
End of explanation
"""
import os
import tensorflow.keras
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras import layers
from keras.optimizers import Adam
#from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
from keras.utils import plot_model
print("TensorFlow version: ",tf.version.VERSION)
"""
Explanation: Note: After executing the above cell you will see the output
tensorflow==2.1.0 that is the installed version of tensorflow.
End of explanation
"""
if not os.path.isdir("../data"):
os.makedirs("../data")
!gsutil cp gs://cloud-training-demos/feat_eng/housing/housing_pre-proc.csv ../data
"""
Explanation: Many of the Google Machine Learning Courses Programming Exercises use the California Housing Dataset, which contains data drawn from the 1990 U.S. Census. Our lab dataset has been pre-processed so that there are no missing values.
First, let's download the raw .csv data by copying the data from a cloud storage bucket.
End of explanation
"""
housing_df = pd.read_csv('../data/housing_pre-proc.csv', error_bad_lines=False)
housing_df.head()
"""
Explanation: Now, let's read in the dataset just copied from the cloud storage bucket and create a Pandas dataframe.
End of explanation
"""
train, test = train_test_split(housing_df, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
train.to_csv('../data/housing-train.csv', encoding='utf-8', index=False)
val.to_csv('../data/housing-val.csv', encoding='utf-8', index=False)
test.to_csv('../data/housing-test.csv', encoding='utf-8', index=False)
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('median_house_value')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
# Next we initialize the training and validation datasets.
batch_size = 32
train_ds = df_to_dataset(train)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
# Input Pipeline
numeric_cols = ['longitude', 'latitude', 'housing_median_age', 'total_rooms',
'total_bedrooms', 'population', 'households', 'median_income']
# Next, we scale the numerical feature columns that we assigned to the variable "numeric cols".
# Scalar def get_scal(feature):
def get_scal(feature):
def minmax(x):
mini = train[feature].min()
maxi = train[feature].max()
return (x - mini)/(maxi-mini)
return(minmax)
feature_columns = []
for header in numeric_cols:
scal_input_fn = get_scal(header)
feature_columns.append(fc.numeric_column(header,
normalizer_fn=scal_input_fn))
"""
Explanation: We can use .describe() to see some summary statistics for the numeric fields in our dataframe. Note, for example, the count row and corresponding columns. The count shows 20433.000000 for all feature columns. Thus, there are no missing values.
Split the dataset for ML
The dataset we loaded was a single CSV file. We will split this into train, validation, and test sets.
End of explanation
"""
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns, dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
opt = keras.optimizers.Adam(learning_rate=.5)
model.compile(optimizer=opt,
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32)
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
# Print .5
"""
Explanation: Using the Keras Sequential Model
Next, we will run this cell to compile and fit the Keras Sequential model.
Model 1 - hard cording the learning rate to .5
End of explanation
"""
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns, dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.1),
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32)
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
# ME: Add a print title: .1
"""
Explanation: Model 2 - hard cording the learning rate to .1
End of explanation
"""
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns, dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt,
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32) # Increase Epochs to 32
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
# Print .01 in title
"""
Explanation: Model 3 - hard cording the learning rate to .01
End of explanation
"""
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns, dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
opt = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=opt,
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32) # Increase Epochs to 32
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
# Print .001
"""
Explanation: Model 4 - hard cording the learning rate to .001
End of explanation
"""
|
PythonFreeCourse/Notebooks | week01/7_Logic_Operators.ipynb | mit | print("True and True => " + str(True and True))
print("False and True => " + str(False and True))
print("True and False => " + str(True and False))
print("False and False => " + str(False and False))
"""
Explanation: <img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="ืืืื ืฉื ืืืื ืืืืื ืืคืืืชืื. ื ืืฉ ืืฆืืืจ ืืฆืืขื ืฆืืื ืืืืื, ืื ืข ืืื ืืืืชืืืช ืฉื ืฉื ืืงืืจืก: ืืืืืื ืคืืืชืื. ืืกืืืื ืืืืคืืข ืืขื ืืฉื ืืงืืจืก ืืื ืืืื ืืื ืื ืืืืืื ืชืื ืืช ืืขืืจืืช.">
<p style="align: right; direction: rtl; float: right;">ืืืคืจืืืจืื ืืืืืื</p>
<div class="align-center" style="display: flex; text-align: right; direction: rtl;">
<div style="display: flex; width: 10%; float: right; ">
<img src="images/tip.png" style="height: 50px !important;" alt="ืืืค!">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl;">
ืืืืืจ ืืืืืจืช ืื ืืื ืืขื ืชืืืืจืื. ืืื ืกืืื ืืืืืช ืขืฆืืืื, ืืืืืจืช ืืืื ื ืขืฉื ืื ืฉืืืืฉ ืจืื.<br>
ืืืื ืฉืืชื ืืืื ืื ืืืื ืืช ืืืืืจ ืืืืืจืช ืื ืืื ืืืกืก ืืช ืืืืข ืฉืืื ืืจืืื.
</p>
</div>
</div>
<p style="align: right; direction: rtl; float: right;">ืืืื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืฉืืขืืจ ืืงืืื ืืืื ื ืงืฆืช ืขื <em>ืืืืืืื ืืืืืื ืืื</em> โ ืืืืืืื ืฉืชืืฆืืชื ืืื ืืืืจื ืืืช ืื ืฉืงืจ.<br>
ืจืืื ื ืืื ืื ืกืืื ืืืืืืื ืืืื, ืืืืื ื ืขื <em>ืืืคืจืืืจื ืืฉืืืื</em> ืืื <code>==</code> ืื <code>in</code>, ืฉืืขืืจืชื ืื ืื ื ืืืืืืื ืืืืื ืฆืืจื ืื ืื ื ืจืืฆืื ืืืฉืืืช ืืื ืืืืืืื.<br>
ืืืื ื ืฉืืืืืืื ืืืื ืืืืืื ืืขืืืจ ืื ื ืืฉืืื ืฉืืืืช ืืขื ืืื ืืช, ืืื ืื ืืืฉืชืืฉ ืืฉ ืืืชืจ ืึพ1000 ืฉ"ื ืืื ืง, ืื ืื ืืืช ืฉืื ืงืืจืืื "ืฉืืื".
</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืื ืื ืืงืจื ืืฉื ืจืฆื ืืฉืืื ืฉืืืืช ืืืจืืืืช ืืืชืจ?<br>
ืืืืืื, ืื ืืชื ืืื ืืืจืฉืื ืืื ืง ืฉืื ื ืื <em>ืื</em> ืฉืืืื ืื ืืืชืจ ืึพ1,000 ืฉ"ื, ื<em>ืื</em> ืฉืชืืื ืืคืงืืืื ื ืืฉืืช?<br>
ืืืจืื ืืฆืขืจ, ืืืืื ืฉืืฉ ืืจืฉืืชื ื ืืจืืข ืื ื ืืื ืื ืกื ืฉืืืืช ืืืจืืืืช ืฉืืืื.<br>
ืืืืืจืช ืื ืื ื ืืื ืขื ืืืื ืืืฉืื, ืืืคืจืืืจืื ืืืืืื, ืฉืืืคืฉืจื ืื ื ืื ืกื ืฉืืืืช ืืืจืืืืช ืืืชืจ.
</p>
<p style="align: right; direction: rtl; float: right;">ืืืืจื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืืืืจ, ืืขืืจืชื ื ืืชืืืืกืื "<dfn>ืืืคืจืืืจืื ืืืืืื</dfn>", ืฉืืืจืชื ืืืคืฉืจ ืื ื ืืฉืืื ืฉืืืืช ืื ืืืข ืืงืฉืจืื ืืื ืืืืืืื ืืืืืื ืืื.<br>
ืืฉื ื ืฉืืืฉื ืืืคืจืืืจืื ืืืืืื: <code>and</code> ("ืืื"), <code>or</code> ("ืื") ืึพ<code>not</code> ("ืื").<br>
</p>
<p style="align: right; direction: rtl; float: right;">ืืืืืืืช</p>
<ul style="text-align: right; direction: rtl; float: right; white-space: nowrap;">
<li>ืืื 2 ืืืื ืึพ1, <mark>ืืื</mark> ืงืื ืึพ3?</li>
<li>ืืื ืืืงืื ืืืืื ืืืจื, <mark>ืืื</mark> ืืื ืืืงืื ื ืืื ืึพ18?</li>
<li>ืืื ืืคืืฆื ืชืืืข ืืืฆื ืฉืขื ืืงืจืืื, <mark>ืื</mark> ืืื ืืฉ ืืืื ืืืงืจืจ?</li>
<li>ืืื ื ืชืงืข ืื ืืืื ืืื ืืฉืื ืืื, <mark>ืืื</mark> ืขื ืืฉืืืื <mark>ืื</mark> ื ืืฆืืื ืงืืกืืื?</li>
<li>ืืื ืืืืื ืืืืชื ืคืืขื, <mark>ืืื</mark> ืื ื <mark>ืื</mark> ื ืืฆื ืืืืชื?</li>
<li>ืืื ืืืฉืชืืฉ ืืงืืฉ ืืืกืคืืื ืงืื ืกืืื ืื ื ืืื, <mark>ืื</mark> ื ืืกื ืืืฉืื ืืกืคืจ ืืืืื ืื ืืืื ืืืกืืื ืฉืืฉ ืื?</li>
</ul>
<p style="text-align: right; direction: rtl; float: right;">
ืืืืืืืืืช ื ืืื ืืฉืื ืื ืืืคืืก ืืืืจ โ ืืืืคืจืืืจืื ืืืืืืื "ืืื" ื"ืื" ืืงืืืื ืืฉื ื ืฆืืืืื ืืืืืืื ืืืืืื ืืื, ืืืืืืจืื ืืืืื ืืืืืื ื ืืื.<br>
ืืืืคืจืืืจ ืืืืื "ืื" ืืงืื ืจืง ืืืืื ืืืืืื ื ืืื, ืืืืคื ืืืชื.<br>
ืืืืืื: ืืฉืืื ืืื 2 ืืืื ืึพ1 (ืื) ืืื 2 ืงืื ืึพ3 (ืื), ืืืืคืจืืืจ ืืืืื "ืืื" ืืงืื "ืื" ืืฉื ื ืฆืืืื, ืืืืืืจ ืืช ืืชืฉืืื ืืกืืคืืช ืื.
</p>
<img src="images/true_and_true.svg" style="max-width:100%; margin-right: auto; margin-left: auto; text-align: center;" alt="ืืืืืงื ืืื 2 ืืืื ืึพ1 ืืืืืจื True, ืฉืืกืืื ืึพV ืืจืืง. ืืืื, ืืืืืงื ืืื 2 ืงืื ืึพ3 ืืืืืจื True, ืฉืืกืืื ืึพV ืืจืืง. ืืฉืชื ืืืืืงืืช ืืืจื ืืฅ ืืชืื ืฆืืจื ืฉืื ืืชืื 'ืืื' ืืื ืฉื ื V ืืจืืงืื, ืืืชืืื ืืืฆื ืืฅ ืฉืืฆืืืข ืขื V ืืจืืง."/>
<p style="align: right; direction: rtl; float: right;">ืืืคืจืืืจืื ืืืืืื</p>
<p style="align: right; direction: rtl; float: right;">and / ืืื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืืืคืจืืืจ ืืืืื <dfn>and</dfn> ืคืืขื ืขื ืฉื ื ืืืืืืื ืืืืืื ืืื, ืืื ืืืืื ื ืืืื ืืฉืืืื, ืืืืืืจ ืื ื ืืืืื ืืืืืื ื ืืื:<br>
<samp>True</samp> ืื ืืืืืืืื ืืฉื ื ืฆืืืื ืืื <code>True</code>, ืื <samp>False</samp> ืืื ืืงืจื ืืืจ.<br>
ืืืืื ืช ืืื ืืืื, <code>and</code> ื ืืฉื "<dfn>ืืืืคืจืืืจ</dfn>" (operator), ืืืืืืืืื ืฉื ืืฆืืื ืืฆืืืื ื ืงืจืืื "<dfn>ืืืืคืจื ืืื</dfn>".
</p>
End of explanation
"""
two_is_greater_than_one = 2 > 1
two_is_smaller_than_three = 2 < 3
two_is_between_one_and_three = two_is_greater_than_one and two_is_smaller_than_three
print(two_is_between_one_and_three)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ื ืืืืง, ืืขืืจืช ืคืืืชืื, ืื 2 ืืืื ืึพ1 ืืงืื ืึพ3:
</p>
End of explanation
"""
print(2 > 1 and 2 < 3)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืื ืืงืืฆืืจ:
</p>
End of explanation
"""
print("True or True => " + str(True or True))
print("False or True => " + str(False or True))
print("True or False => " + str(True or False))
print("False or False => " + str(False or False))
"""
Explanation: <p style="align: right; direction: rtl; float: right;">or / ืื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืืืคืจืืืจ ืืืืื <dfn>or</dfn> ืคืืขื ืขื ืฉื ื ืืืืืืื ืืืืืื ืืื, ืืื ืืืืื ื ืืืื ืืฉืืืื, ืืืืืืจ ืื ื ืืืืื ืืืืืื ื ืืื:<br>
<samp>True</samp> ืื <em>ืืคืืืช</em> ืืื ืืืืืืืืื, ืืืืื ื ืื ืืฉืืืื, ืื <code>True</code>.<br>
ืืื ืืืืืจ <samp>False</samp> ืจืง ืื ืืืืืืืื ืืฉื ื ืืฆืืืื ืฉืื ืื <code>False</code>.<br>
ืืืืื ืช ืืื ืืืื, <code>or</code> ื ืืฉื "<dfn>ืืืืคืจืืืจ</dfn>" (operator), ืืืืืืืืื ืฉื ืืฆืืื ืืฆืืืื ื ืงืจืืื "<dfn>ืืืืคืจื ืืื</dfn>" (operands).
</p>
End of explanation
"""
a_in_barvaz = 'a' in 'barvaz'
x_in_barvaz = 'x' in 'barvaz'
a_or_x_in_barvaz = a_in_barvaz or x_in_barvaz
print(a_or_x_in_barvaz)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ื ืืืืง ืืขืืจืช ืคืืืชืื ืื ืืืช ืืืืชืืืช, a ืื x, ื ืืฆืืืช ืืืืื "barvaz".<br>
</p>
End of explanation
"""
print('a' in 'barvaz' or 'x' in 'barvaz')
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืื ืืงืืฆืืจ:
</p>
End of explanation
"""
print("not True => " + str(not True))
print("not False => " + str(not False))
"""
Explanation: <p style="align: right; direction: rtl; float: right;">not / ืื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืืืคืจืืืจ ืืืืื <dfn>not</dfn> ืคืืขื ืขื ืืืืื ืืืืืื ื ืืื ืฉืื ืืืจืื, ืืืืืืจ ืื ื ืืช ืืืืืื ืืืืืืื ื ืืืคืื ืื. ืจืืฆื ืืืืจ:<br>
<samp>True</samp> ืื ืืืืืื ืืืืืืื ื ืฉืืืจืื ืืื <code>False</code>.<br>
<samp>False</samp> ืื ืืืืืื ืืืืืืื ื ืฉืืืจืื ืืื <code>True</code>.<br>
ืืืืื ืช ืืื ืืืื, <code>not</code> ื ืืฉื "<dfn>ืืืืคืจืืืจ</dfn>" (operator), ืืืืืืื ืฉื ืืฆื ืืืจืื ื ืงืจื "<dfn>ืืืืคืจื ื</dfn>" (operand).
</p>
End of explanation
"""
is_q_in_barvaz = 'q' in 'barvaz'
is_q_not_in_baraz = not is_q_in_barvaz
print(is_q_not_in_baraz)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ื ืืืืง ืืขืืจืช ืคืืืชืื ืืื ืืืืื "barvaz" ืื ืืืืืช ืืชืืื ืืช ืืืืช "q":
</p>
End of explanation
"""
print(not 'q' in 'barvaz')
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืื ืืงืืฆืืจ:
</p>
End of explanation
"""
print('q' not in 'barvaz')
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืืจืืง ืืื ืื ืืชืืกืคืช: ืคืืืชืื ืืืคืฉืจืช ืื ื ืืืชืื ืืช ืื ืืคืืื ืืฆืืจื ืงืจืืื ืืืชืจ, ืืขืืจืช ืืืืคืจืืืจ <code>not in</code>:
</p>
End of explanation
"""
people_in_party = int(input('How many people in your party?: '))
whole_pizzas = int(input('How many pizzas did you order?: '))
slices_of_pizza = 8 * whole_pizzas
"""
Explanation: <p style="align: right; direction: rtl; float: right;">ืืืืื</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืฆืืจื ืืืฉื ืืฉืืขืืจ, ื ื ืื ืฉืืฉ ืื ื ืืช ืืืฉืชื ืื ืืืืื.<br>
ืืจืืฆื ืืช ืืชื ืืืืื ืขืจืืื. ื ืฉืชืืฉ ืืืฉืชื ืื ืืืื ืืืืฉื.
</p>
End of explanation
"""
print(people_in_party >= 20)
print(slices_of_pizza > 100)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืืกืืื ืฉืืื ืืื ืืกืืื ืฉืืฉ ืื ืืคืืืช 20 ืืฉืชืชืคืื, ืืื ืืืชืจ ืึพ100 ืืฉืืืฉื ืคืืฆื.<br>
ืขื ืขืืฉืื, ืืืื ื ืืืืืื ืืืืืง ืืื ืืกืืื ืืื ืฉืืื ืจืง ืืฆืืจื ืืื, ืฉืืืคืืกื ืื ื 2 ืชืฉืืืืช:
</p>
End of explanation
"""
print("Is it a happy party?")
print(people_in_party >= 20 and slices_of_pizza > 100)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืืคืฉืจ ืืืืื ืืื ืฉืจืง ืืืงืจื ืฉ<strong>ืื</strong> ืืฉืืจื ืืจืืฉืื ื ืืืืคืก <samp>True</samp> ื<strong>ืื</strong> ืืฉืืจื ืืฉื ืืื ืืืืคืก <samp>True</samp>, ืืืกืืื ืืื ืืกืืื ืฉืืื.<br>
ืืกืืืืืฆืื ืืื, ืื ื ืจืื, ืืชืืืื ืืืืคืจืืืจ ืืืืื ืืจืืฉืื ืฉืืืืจื ื ืขืืื โ <code>and</code>, ืืืฉืื ืืขืืจื <em>"ืืื"</em>.<br>
ืืฉืืื ืฉืฉืืื ื ืืื "ืืื ืืกืคืจ ืืื ืฉืื ืืืกืืื ืืื ืืคืืืช 20, <strong>ืืื</strong> ืืกืคืจ ืืฉืืืฉื ืืคืืฆื ืืืกืืื ืืื ืืขื 100".<br>
ื ื ืกื ืืงืื:
</p>
End of explanation
"""
print(people_in_party <= 5)
print(slices_of_pizza < people_in_party)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
"ืืกืืื ืขืฆืืื" ืืื ืืกืืื ืฉืืฉ ืื ืขื 5 ืืฉืชืชืคืื, ืื ืฉืื ืืื ืืฉืืืฉ ืคืืฆื ืืื ืืฉืชืชืฃ.<br>
ืขื ืขืืฉืื, ืืืื ื ืืืืืื ืืขืฉืืช ืืช ืื ืจืง ืืฆืืจื ืืื, ืฉืืืคืืกื ืื ื 2 ืชืฉืืืืช:
</p>
End of explanation
"""
print("Is it a sad party?")
print(people_in_party <= 5 or slices_of_pizza < people_in_party)
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">
ืืคืฉืจ ืืืืื ืืื ืฉืืืงืจื ืฉืชืืฆืืช ืืฉืืจื ืืจืืฉืื ื ืืื <samp>True</samp> <strong>ืื</strong> ืฉืชืืฆืืช ืืฉืืจื ืืฉื ืืื ืืื <samp>True</samp>, ืืืกืืื ืืื ืืกืืื ืขืฆืืื.<br>
ืืกืืืืืฆืื ืืื, ืื ื ืจืื, ืืชืืืื ืืืืคืจืืืจ ืืืืื ืืฉื ื ืฉืืืืจื ื ืขืืื โ <code>or</code>, ืืืฉืื ืืขืืจื <em>"ืื"</em>.<br>
ืืฉืืื ืฉืฉืืื ื ืืื "ืืื ืืกืคืจ ืืื ืฉืื ืืืกืืื ืงืื ืื ืฉืืื ืึพ5, <strong>ืื</strong> ืืกืคืจ ืืฉืืืฉื ืืคืืฆื ืงืื ืืืกืคืจ ืืื ืฉืื ืืืกืืื".<br>
ื ื ืกื ืืงืื:
</p>
End of explanation
"""
# ืืจืืฆื ืืช ืืฉืืจืืช ืืืืืช
age = 17
balaganist = True
have_chartreuse = True
have_fernet = True
have_lime = True
have_sugar = True
degrees_of_cup_in_celsius = 0
is_closed = False
is_problematic = age < 18 or balaganist
print(is_problematic)
have_ingredients = have_chartreuse and have_fernet and have_lime and have_sugar
print(have_ingredients)
cup_is_fine = -273.15 < degrees_of_cup_in_celsius and degrees_of_cup_in_celsius < 4
# ืืจืืง! ืืคืฉืจ ืื ืืจืฉืื:
# cup_is_fine = -273.15 < degrees_of_cup_in_celsius < 4
print(cup_is_fine)
can_drink_industry_sour_cocktail = (not is_problematic) and have_ingredients and cup_is_fine and not is_closed
print(can_drink_industry_sour_cocktail)
"""
Explanation: <p style="align: right; direction: rtl; float: right;">ืืืืืืช ืืืช</p>
<p style="text-align: right; direction: rtl; float: right;">
<dfn>ืืืืืืช ืืืช</dfn> ืื ืืจื ืืฉื ื ื ืืฉื ื ืืชืืจ ืืช ืืชืืฆืื ืฉื ืืืคืจืืืจืื ืืืืืื ืืื ืืื ืืืืงืจืื ืืืคืฉืจืืื.<br>
ื ืฆืืืจ ืืืืืืช ืืืื ืขืืืจ <code>and</code>, <code>or</code> ืึพ<code>not</code>.<br>
ืื ืื ื ืืืืืฆืื ืฉืื ืืืืื ืืืชื ืืขื ืคื, ืืื ืืืืื ืืช ืืืืืืื ืฉืืื.
</p>
| ืชืืฆืื | ืืืคืจื ื ืฉื ื | ืืืคืจืืืจ | ืืืคืจื ื ืจืืฉืื |
|:----------|:----------|:--------|------:|
| True | True | and | True |
| False | False | and | True |
| False | True | and | False |
| False | False | and | False |
| ืชืืฆืื | ืืืคืจื ื ืฉื ื | ืืืคืจืืืจ | ืืืคืจื ื ืจืืฉืื |
|:----------|:----------|:--------|------:|
| True | True | or | True |
| True | False | or | True |
| True | True | or | False |
| False | False | or | False |
| ืชืืฆืื | ืืืคืจื ื ืจืืฉืื | ืืืคืจืืืจ |
|:----------|:----------|:--------:|
| False | True | not |
| True | False | not |
<p style="align: right; direction: rtl; float: right;">ืืื ืืื</p>
<dl style="text-align: right; direction: rtl; float: right; white-space: nowrap;">
<dt>ืืืคืจืืืจ ืืืื</dt><dd>ืืืื ืฉืืืฆืขืช ืคืขืืื ืืื ืฉื ื ืขืจืืื ืืืืืื ืืื. ืืคืขืืื ื ืงืจื ืื "<dfn>ืฉืขืจ ืืืื</dfn>".</dd>
<dt>ืืืคืจื ื</dt><dd>ืืจืืื: ืืืคืจื ืืื. ืืขืจืืื ืฉืขืืืื ืืคืขืืืื ืืช ืืคืขืืื. ืืืงืจื ืฉืื ื, ืฉืขืืืื ืื ืื ื ืจืืฆืื ืืืืื ืืช ืืืืคืจืืืจ ืืืืื.</dd>
</dl>
<p style="align: right; direction: rtl; float: right;">ืชืจืืื</p>
<p style="align: right; direction: rtl; float: right;">ืื ืื ืื ืื ืืื ืืื?</p>
<p style="text-align: right; direction: rtl; float: right;">
ืืื ืชื ืืืฉืืืช ืฉืืชืืฆืขื ืืฉืืจื ืืืื, ืจืฉืื ืืฆื ืืช ืืชืืฆืืืช ืฉื ืื ืืืช ืืืฉืืจืืช ืืืืืช.<br>
ืจืง ืืืืจ ืืื ืืจืืฆื, ืืืืงื ืืื ืฆืืงืชื ืืชืฉืืืชืื.<br></p>
End of explanation
"""
parent_name = 'David'
parent_age = 52
child_name = 'Havatzelet'
child_age = 25
average_max_lifespan_in_israel = 82.4
parent_name == "David" and parent_age > 52
'a' == 'b' and 'c' == 'c'
parent_name == 'David' and parent_name == 'david'
parent_name == 'David' or parent_age > 53
'd' in str(parent_age) or 'd' in parent_name
child_age * 2 > parent_age or child_name in parent_name
not (child_name == parent_name) or 2 ** 5 == 1
child_age >= 0 and average_max_lifespan_in_israel > child_age * 2
not ('y' not in parent_name and 'y' not in child_name) or child_age > parent_age or child_age < 0 or parent_age / 1 == parent_age * 1
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="ืชืจืืื">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ืื ืงืืืืชื?<br>
ืฉื ื ืขื 2 ืืฉืชื ืื ืืืขืื ืืื ืืงืื ืชืืฆืื ืืืจืช.<br>
ืืฆืื 2 ืชืฉืืืืช ืืคืฉืจืืืช.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">
ืืืื ื ืืฉืื:
</p>
End of explanation
"""
|
Jackporter415/phys202-2015-work | assignments/assignment08/InterpolationEx01.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
"""
Explanation: Interpolation Exercise 1
End of explanation
"""
with np.load('trajectory.npz') as data:
t = data['t']
x = data['x']
y = data['y']
assert isinstance(x, np.ndarray) and len(x)==40
assert isinstance(y, np.ndarray) and len(y)==40
assert isinstance(t, np.ndarray) and len(t)==40
"""
Explanation: 2D trajectory interpolation
The file trajectory.npz contains 3 Numpy arrays that describe a 2d trajectory of a particle as a function of time:
t which has discrete values of time t[i].
x which has values of the x position at those times: x[i] = x(t[i]).
x which has values of the y position at those times: y[i] = y(t[i]).
Load those arrays into this notebook and save them as variables x, y and t:
End of explanation
"""
newt = np.linspace(t.min(),t.max(),200)
fx = interp1d(t,x,kind = 'cubic')
fy = interp1d(t,y,kind = 'cubic')
newx = fx(newt)
newy = fy(newt)
len(newx)
assert newt[0]==t.min()
assert newt[-1]==t.max()
assert len(newt)==200
assert len(newx)==200
assert len(newy)==200
"""
Explanation: Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays:
newt which has 200 points between ${t_{min},t_{max}}$.
newx which has the interpolated values of $x(t)$ at those times.
newy which has the interpolated values of $y(t)$ at those times.
End of explanation
"""
ax = plt.gca()
plt.plot(newx,newy, color = 'r')
plt.plot(x,y, 'bo')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.title('Trajectory')
plt.xlabel('Distance')
plt.ylabel('Height')
assert True # leave this to grade the trajectory plot
"""
Explanation: Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points:
For the interpolated points, use a solid line.
For the original points, use circles of a different color and no line.
Customize you plot to make it effective and beautiful.
End of explanation
"""
|
ethen8181/machine-learning | reinforcement_learning/multi_armed_bandits.ipynb | mit | # code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', 'notebook_format'))
from formats import load_style
load_style(css_style='custom2.css', plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import beta
from collections import namedtuple
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,scipy
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Multi-Armed-Bandits" data-toc-modified-id="Multi-Armed-Bandits-1"><span class="toc-item-num">1 </span>Multi-Armed Bandits</a></span><ul class="toc-item"><li><span><a href="#Differences-Between-A/B-Testing-and-Bandit-Testing" data-toc-modified-id="Differences-Between-A/B-Testing-and-Bandit-Testing-1.1"><span class="toc-item-num">1.1 </span>Differences Between A/B Testing and Bandit Testing</a></span></li><li><span><a href="#Bandit-Algorithms" data-toc-modified-id="Bandit-Algorithms-1.2"><span class="toc-item-num">1.2 </span>Bandit Algorithms</a></span><ul class="toc-item"><li><span><a href="#Algorithm-1---Epsilon-Greedy" data-toc-modified-id="Algorithm-1---Epsilon-Greedy-1.2.1"><span class="toc-item-num">1.2.1 </span>Algorithm 1 - Epsilon Greedy</a></span></li><li><span><a href="#Algorithm-2---Boltzmann-Exploration-(Softmax)" data-toc-modified-id="Algorithm-2---Boltzmann-Exploration-(Softmax)-1.2.2"><span class="toc-item-num">1.2.2 </span>Algorithm 2 - Boltzmann Exploration (Softmax)</a></span></li><li><span><a href="#Algorithm-3---Upper-Confidence-Bounds-(UCB)" data-toc-modified-id="Algorithm-3---Upper-Confidence-Bounds-(UCB)-1.2.3"><span class="toc-item-num">1.2.3 </span>Algorithm 3 - Upper Confidence Bounds (UCB)</a></span></li></ul></li><li><span><a href="#Experimenting-With-Bandit-Algorithms" data-toc-modified-id="Experimenting-With-Bandit-Algorithms-1.3"><span class="toc-item-num">1.3 </span>Experimenting With Bandit Algorithms</a></span></li></ul></li><li><span><a href="#Bayesian-Bandits" data-toc-modified-id="Bayesian-Bandits-2"><span class="toc-item-num">2 </span>Bayesian Bandits</a></span><ul class="toc-item"><li><span><a href="#Beta-Distribution" data-toc-modified-id="Beta-Distribution-2.1"><span class="toc-item-num">2.1 </span>Beta Distribution</a></span></li><li><span><a href="#Thompson-Sampling" data-toc-modified-id="Thompson-Sampling-2.2"><span class="toc-item-num">2.2 </span>Thompson Sampling</a></span></li><li><span><a href="#Notes-On-Bandit-Testings" data-toc-modified-id="Notes-On-Bandit-Testings-2.3"><span class="toc-item-num">2.3 </span>Notes On Bandit Testings</a></span><ul class="toc-item"><li><span><a href="#Short-term-testing" data-toc-modified-id="Short-term-testing-2.3.1"><span class="toc-item-num">2.3.1 </span>Short-term testing</a></span></li><li><span><a href="#Long-term-testing" data-toc-modified-id="Long-term-testing-2.3.2"><span class="toc-item-num">2.3.2 </span>Long-term testing</a></span></li></ul></li><li><span><a href="#Reference" data-toc-modified-id="Reference-2.4"><span class="toc-item-num">2.4 </span>Reference</a></span></li></ul></li></ul></div>
End of explanation
"""
def generate_bernoulli_bandit_data(n_simulations, K):
"""
Generate simluate data, that represents success / trial data
Parameters
----------
n_simulations : int
the total number of turns in a simulation.
K : int
the total number of arms.
Returns
-------
ctr : float 1d ndarray, shape[K,]
the randomly generated empirical click through rate for each arm
rewards : bool 2d ndarray, shape [n_simulations, K]
given the empirical ctr, simulate in each turn of the simulation,
whether the arm gets pulled will obtain the
reward or not (whether the webpage gets clicked)
"""
ctr = np.random.rand(K)
rewards = np.random.rand(n_simulations, K) < np.tile(ctr, (n_simulations, 1))
return ctr, rewards
K = 2
n_simulations = 5
ctr, rewards = generate_bernoulli_bandit_data(n_simulations, K)
print(ctr)
print(rewards)
"""
Explanation: Multi-Armed Bandits
Imagine this scenario: we're in a casino. There are many different slot machines (known as "one-armed bandits", as they're known for robbing people), each with a lever (an arm, if you will). We think that some slot machines payout more frequently than others do, and our goal is to walk out of the casino with the most money.
The question is, how do we learn which slot machine rewards us with the most money in the shortest amount of time? We could try all the slot machines out to get a sense of the expected return from playing each machine. But remember, each time we play a poor performing machine, we lower our take that we walk out of the casino with that night. In order to maximize how much money we walk out of the casino with, we will have to be efficient with how we collect our data.
Rewriting the scenario above into a business language. Each time a shopper looks comes to a webpage, we show them one of the $K$ variations of the webpage. They either click on it or do not, and we log this information about the (binary) reward for each $K$ variations. Next, we proceed to the next shopper and have to choose one of $K$ webpage variations again.
Differences Between A/B Testing and Bandit Testing
In both scenarios above, we would normally determine our "winner" (the slot machine that pays the most, or the best webpage variations that gets the most clicks) using the well-known A/B testing approach. The A/B testing approach consists of a period of pure exploration, where we're randomly assigning equal numbers of users to one of the $K$ variations and run the test until it's valid. After that, it jumps into pure exploitation, where you send 100% of your users to the more successful version of your site.
Two possible problems with the classical A/B testing approach is that:
It jumps discretely from exploration to exploitation, when we might be able to transition more smoothly.
During the exploratory phase (the test), it wastes resources exploring inferior options in order to gather as much data as possible.
Given the exploration - exploitation dilemma stated above, the bandit testing approach try to account for this. The following graph depicts the difference between the two types of testing method:
<img src="img/ab_vs_bandit.png" width="70%" height="70%">
If we have three variations that we wish to test, the A/B testing approach we try out each of the three variations with equal proportions until we are done with our test at week 5, and then select the variation with the highest value.
As for bandit testing, it attempts to use what it knows about each variation from the very beginning, and it continuously updates the probabilities that it will select each variation throughout the optimization process. In the above chart we can see that with each new week, the bandit testing reduces how often it selects the lower performing options and increases how often if selects the highest performing option.
We need to explore in order to figure out what works and what doesn't. On the other hand, if we exploit we take advantage of what we have learned. The bandit testing approach highlights the fact that collecting data also has its cost.
To be specific, bandit testing algorithms will try to minimize what's known as regret, which is the difference between our actual payoff and the payoff we would have collected had we played the optimal (best) options at every opportunity. There are tons of different bandit methods, in the next section we'll look at some of the more common ones.
Bandit Algorithms
Before introducing the algorithms and trying them out through simulations, we'll denote some notations and terminologies to formally define the problem:
Arms is simply the variations that we're testing (webpages that we're testing) and there will be $K$ of them in total.
In a simulation of $t$ turns (how many samples in a simulation), we'll maintain empirical means of the reward for each arm (e.g. after trying out arm A for 10 turns, it got 3 clicks, the empirical means is simply 0.3) that are updated at every turn $t$.
$u_i(t)$ is the empirical mean of arm $i$ after $t$ turns.
$p_i(t)$ is the probability of picking arm $i$ at turn $t$.
Let's look at our simulated data before diving into each algorithms (hopefully the docstrings are self-explanatory).
End of explanation
"""
def epsilon_greedy(counts, epsilon=0.5, decrease_const=1000):
"""
Adaptive epsilon greedy
Parameters
----------
counts : int 2d-array, shape(K, 2), where K = the total number of arms
success and failures for each arm where column 0 represents
success, 1 represents failure
epsilon : float
the initial probability of choosing a random arm;
1 - epsilon is the probability of choosing the current best arm
decrease_const : int
parameter for the adaptive (annealing) epsilon, where the epsilon
parameter will decrease as time goes by.
Returns
-------
(int) the chosen arm
"""
# calculate the empirical means and the total number of simulations that were ran
n_arms = counts.shape[0]
totals = counts.sum(axis=1)
successes = counts[:, 0]
empirical_means = successes / totals
total_counts = counts.sum()
epsilon /= (1 + total_counts / decrease_const)
if np.random.rand() > epsilon:
return np.argmax(empirical_means)
else:
return np.random.randint(0, n_arms)
# counts : stores the counts of success and failures for each arm
# where column 0 represents success, 1 represents failure.
# each arm's count is initialiated as 1 to ensure that each arm is
# played at least once, to prevent "cold start" problem and
# 0 division in the beginning
K = 2
counts = np.ones((K, 2))
print(counts)
epsilon_greedy(counts)
"""
Explanation: Algorithm 1 - Epsilon Greedy
At each round $t = 1, 2, ...$ the Epsilon Greedy algorithm will:
Choose a random arm with the probability of $\epsilon$.
Choose the arm with the current best empirical mean with probability of $1-\epsilon$.
In mathematical notations:
\begin{align}
p_i(t+1)=
\begin{cases}
1 - \epsilon + \epsilon \big/ K & \quad \text{if i = } argmax_{j = 1, ..., K} \ u_j(t) \
\epsilon \big/ K & \quad otherwise
\end{cases}
\end{align}
Or more intuitively:
When a new visitor comes to the site, the algorithm flips a coin that comes up tail with the probability of $\epsilon$. When it does in fact comes up tail, the algorithm is going to explore. The exploration phase is to randomly choose amongst any possible arm with equal (uniform) probability and showing it to the visitor.
On the other hand, the algorithm will exploit the best known solution with the probability of $1- \epsilon$. To exploit, the algorithm looks up the current empirical means and shows the best one to the visitor.
The image below sums up the algorithm pretty well.
<img src="img/epsilon_greedy.png" width="70%" height="70%">
End of explanation
"""
# show adaptive learning rate
epsilon = 0.5
decrease_const = 1000
# the epsilon value after 10 turns
total_counts = 10
print(epsilon / (1 + total_counts / decrease_const))
# after 10000 turns
total_counts = 10000
print(epsilon / (1 + total_counts / decrease_const))
"""
Explanation: The decrease_const parameter in the function above may look unfamiliar.
For the Epsilon Greedy algorithm, setting the $\epsilon$ can be a bit tricky. If it's too small, exploration will be slow at the beginning, and we will be slow to react to changes. If we happen to sample, say, the second-best arm the first few times, it may take a long time to discover that another arm is actually better. If $\epsilon$ is too big, we'll waste many trials pulling random arms without gaining much.
To accommodate for this situation, we will set the $\epsilon$ value at a higher value in the beginning and anneal (gradually lower) it over time. Intuitively, this simply means that after exploring around for a while, we become more certain about each arms' empirical means. After that, it's better to exploit.
In the function call above, the $\epsilon$ at turn $t$ will become:
\begin{align}
\epsilon(t) = \epsilon(0) \Big/ (1 + t/T)
\end{align}
Where $T$ is a new parameter that represents a decreasing constant.
Note that there are different ways of annealing a parameter, but the spirit is the same.
End of explanation
"""
def softmax(counts):
"""
adaptive softmax
Parameters
----------
counts : int 2d-array, shape( K, 2 ), where K = the total number of arms
success and failures for each arm where column 0 represents
success, 1 represents failure
Returns
-------
(int) the chosen arm
"""
# calculate the empirical means and the total number of simulations that were ran
totals = counts.sum(axis=1)
successes = counts[:, 0]
empirical_means = successes / totals
total_counts = counts.sum()
# annealing (adaptive learning rate)
tau = 1 / np.log(total_counts + 0.000001)
probs_n = np.exp(empirical_means / tau)
probs_d = probs_n.sum()
probs = probs_n / probs_d
cum_prob = 0.
z = np.random.rand()
for idx, prob in enumerate(probs):
cum_prob += prob
if cum_prob > z:
return idx
counts = np.ones((K, 2))
softmax(counts)
"""
Explanation: Algorithm 2 - Boltzmann Exploration (Softmax)
The Softmax algorithm picks each arm with a probability that is proportional to its average reward.
\begin{align}
p_i(t+1)= \frac{ e^{u_i(t) / \tau} }{ \sum_{j=1}^K e^{u_j(t) / \tau} }
\end{align}
Where $\tau$ is a temperature parameter, controlling the randomness of the choice. When $\tau$ = 0, the algorithm acts like pure greedy. As $\tau$ grows to infinity, the algorithm will pick arms uniformly at random.
End of explanation
"""
def ucb(counts):
"""
Upper Confidence Bounds
Parameters
----------
counts : int 2d ndarray, shape [K, 2], where K = the total number of arms
success and failures for each arm where column 0 represents
success, 1 represents failure
Returns
-------
(int) the chosen arm
"""
# calculate the empirical means and the total number of simulations that were ran
totals = counts.sum(axis=1)
successes = counts[:, 0]
empirical_means = successes / totals
total_counts = counts.sum()
bonus = np.sqrt(2 * np.log(total_counts) / totals)
return np.argmax(empirical_means + bonus)
counts = np.ones((K, 2))
softmax(counts)
"""
Explanation: Algorithm 3 - Upper Confidence Bounds (UCB)
In the world of statistics, whenever we estimate some unknown parameter (such as the mean of a distribution) using random samples, there is a way to quantify the uncertainty inherent in our estimate. For example, the true mean of a fair six-sided die is 3.5. But if we only roll it once and get a 2, our best estimate of the mean is just 2. Obviously that estimate is not very good, and we can quantify the confidence we have for our estimate. There are confidence bounds which can be written, for example, as: "The mean of this die is 2, with a 95-th percentile lower bound of 1.4 and a 95-th percentile upper bound of 5.2."
The upper confidence bound (UCB) family of algorithms, as its name suggests, selects the arm with the largest upper confidence bound at each turn. The intuition is this: the more times we roll the die, the tighter the confidence bounds, and if we roll the die an infinite number of times then the width of the confidence bound is zero. In short, as the number of rolls increases, the uncertainty decreases, and so does the width of the confidence bound.
Thus, unlike Epsilon Greedy and Softmax algorithm that only keeps track of the empirical means, the UCB algorithm also maintains the number of times that each arm has been played, denoted by $n_i(t)$. Initially, each arm is played once. Afterwards, at round t, the algorithm greedily picks the arm $j(t)$ as follows:
\begin{align}
j(t) = argmax_{i = 1, ..., K} \left( u_i + \sqrt{\frac{2 \cdot ln(t)}{n_i}} \right)
\end{align}
We can see that the UCB algorithm will try to learn about arms that we don't know enough about. The main advantages of these types of algorithms are:
Take uncertainty of sample mean estimate into account in a smart way.
No parameters (e.g. epsilon, annealing) to validate.
End of explanation
"""
def run_bandit_algo(rewards, ctr, algo, **kwargs):
"""
Run different types of bandit algorithms
Parameters
----------
rewards, ctr :
Return value of the `generate_bernoulli_bandit_data` function
algo : bandit function
[epsilon_greedy, softmax, ucb]
**kwargs :
additional parameters to pass in to the algo
Returns
-------
cum_regret : 1d ndarray, shape [n_simulations,]
The total regret accumulated over the experiment, where the regret
is measured by the maximum ctr - the chosen arm's ctr
opt_arm_percentage : float
The percentage of plays in which the optimal arm is pulled
"""
n_simulations, K = rewards.shape
# counts : success and failures for each arm where column 0 represents
# success, 1 represents failure. Each arm's count is initialiated as 1
# to ensure that each arm is played at least once, to prevent "cold start"
# problem and 0 division in the beginning
counts = np.ones((K, 2), dtype=np.int)
regret = np.zeros(n_simulations)
max_ctr_count = 0
max_ctr = np.max(ctr)
max_ctr_idx = np.argmax(ctr)
for i in range(n_simulations):
# 1. run the algorithm to obtain the arm that got pulled
# 2. update the success / failure according to the generated rewards
# 3. update the expected regret for each turn of the simulation
# 4. if the arm that got pulled is the one with the opt ctr, increment this count
arm = algo(counts, **kwargs)
if rewards[i, arm] == 1:
counts[arm, 0] += 1
else:
counts[arm, 1] += 1
regret[i] = max_ctr - ctr[arm]
if arm == max_ctr_idx:
max_ctr_count += 1
cum_regret = np.cumsum(regret)
opt_arm_percentage = max_ctr_count / n_simulations
return cum_regret, opt_arm_percentage
def run_experiment(K, n_simulations, algorithms):
"""
Run the bandit algorithm's simulation by the
specified number of samples for simulation, the number of arms
and the different version of algorithm
Parameters
----------
n_simulations : int
the total number of turns in a simulation
K : int
the total number of arms
algorithms : list of functions
the list of bandit algorithms to simulate
Returns
-------
ctr : float 1d-array, shape [K,]
the randomly generated empirical click through rate for each arm
algo_opt_arm_percentage : float list
the percentage of simulations that chose the best arm
algo_cum_regret : float 2d-array, shape [n_simulations, length of the algorithm]
each column stores the cumulative regret for one algorithm
fig : matplotlib figure
the cumulative regret for each bandit algorithm
"""
algo_opt_arm_percentage = []
algo_cum_regret = np.zeros((n_simulations, len(algorithms)))
fig = plt.figure(figsize=(10, 7))
ctr, rewards = generate_bernoulli_bandit_data(n_simulations, K)
for idx, algo in enumerate(algorithms):
cum_regret, opt_arm_percentage = run_bandit_algo(rewards, ctr, algo=algo)
algo_cum_regret[:, idx] = cum_regret
algo_opt_arm_percentage.append(opt_arm_percentage)
plt.semilogy(cum_regret, label=algo.__name__)
plt.title('Simulated Bandit Performance for K = {}'.format(K))
plt.ylabel('Cumulative Expected Regret')
plt.xlabel('Round Index')
plt.legend(loc='lower right')
return ctr, algo_opt_arm_percentage, algo_cum_regret, fig
# change default figure size and font size
plt.rcParams['figure.figsize'] = 8, 6
plt.rcParams['font.size'] = 12
K = 5
n_simulations = 10000
algorithms = [epsilon_greedy, softmax, ucb]
np.random.seed(2345)
ctr, algo_opt_arm_percentage, algo_cum_regret, fig = run_experiment(K, n_simulations, algorithms)
plt.show()
print(ctr)
print(algo_opt_arm_percentage)
"""
Explanation: Experimenting With Bandit Algorithms
In this section, we'll use our simulated data to experiment with our algorithms. To do this we'll also need a metric to calculate how well we are doing. Recall the absolute best we can do is to always pick the webpage (arm) with the largest click through rate (ctr). Denote this best arm's probability of $w_{opt}$. Our score should be relative to how well we would have done had we chosen the best arm from the beginning. This motivates the total regret of a strategy, defined as:
\begin{align}
R_T & = \sum_{t=1}^{T} \left( w_{opt} - w_{I(t)} \right) \nonumber \
& = Tw_{opt} - \sum_{t=1}^{T} \; w_{I(t)}
\end{align}
Where $T$ is the total number of samples in the experiment, $w_{I(t)}$ is the probability of obtaining the reward (getting clicked) of the chosen arm in the $t_{th}$ turn. A total regret of 0 means the strategy is attaining the best possible score. This is likely not possible, as initially our algorithm will often make the wrong choice. Ideally, a strategy's total regret should flatten as it learns the best bandit. (Mathematically, we achieve $w_{I(t)} = w_{opt}$ often)
We'll run the experiment and plot the cumulative regret of the three algorithms below:
End of explanation
"""
plt.figure(figsize=(12, 5))
x = np.linspace(0.01, .99, 100)
params = [(2, 5), (1, 1), (5, 5), (20, 4)]
for a, b in params:
y = beta.pdf(x, a, b)
lines = plt.plot(x, y, label="(%.1f,%.1f)" % (a, b), lw=2)
plt.fill_between(x, 0, y, alpha=0.2, color=lines[0].get_color())
plt.autoscale(tight=True)
plt.legend(loc='upper left', title="(a,b)-parameters")
plt.show()
"""
Explanation: Section Conclusion: The cumulative expected regret plot from our experiment showed that all three different algorithms have converged (the cumulative expected regret gradually decreases to a steady level). And the UCB seems to be doing better than the other two algorithms in this limited horizon as the way to read the graph is the lower the better (the y-axis represents regrets).
Bayesian Bandits
Next, we'll introduce a Bayesian method called Thompson Sampling. Recall that the problem we want to solve is the following. We came up with $K$ different variations of the webpage (e.g. different layout) and we wish to find the ones with the best click through rate (CTR), e.g. clicking to sign-up for the newsletter. Let's represent each CTR by $\theta_i$ - i.e., $\theta_i$ is the true probability that an individual user will click when they were shown with the $i_{th}$ webpage. It is important to note that we don't actually know what $\theta_i$ is - if we did, we could simply choose $i$ for which $\theta_i$ was largest and move on. We're simply pretending that we know in order to simulate the performance of the algorithm.
Using the Bayesian approach we will construct a prior probability distribution which represents our original belief about what the actual value of $\theta_i$, our ctr for the $i_{th}$ webpage is. The prior we'll use is the Beta distribution. Here's a quick recap of the distribution:
Beta Distribution
The Beta distribution is very useful in Bayesian statistics. A random variable $X$ has a Beta distribution, with parameters $(\alpha, \beta)$, if its density function is:
\begin{align}
f_X(x | \; \alpha, \beta ) = \frac{ x^{(\alpha - 1)}(1-x)^{ (\beta - 1) } }{B(\alpha, \beta) }
\end{align}
where $B$ is the Beta function (hence the name). The random variable $X$ is only allowed in [0,1], making the Beta distribution a popular distribution for decimal values, probabilities and proportions. The values of $\alpha$ and $\beta$, both positive values, provide great flexibility in the shape of the distribution. Below we plot some Beta distributions with different $\alpha$ and $\beta$ values:
End of explanation
"""
class BayesianBandit:
"""
Thompson Sampling
Parameters
----------
K : int
total number of arms
prior_params : list of float length 2 tuple, default None, (optional)
each element of the list is a tuple, where each tuple
contains the alpha and beta parameter that represents the prior
beta distribution for each arm. If not supplied
it will assume that all arms's prior starts with an uniform distribution
Attributes
----------
trials, success : int 1d ndarray, shape [K,]
stores the trials and success for each arm,
e.g. trial = [ 1, 1 ] and success = [ 0, 1 ] means
that both arm has been pulled once and arm 1 has generated
the reward (clicked)
"""
def __init__(self, K, prior_params=None):
if prior_params:
priors = namedtuple("priors", ["alpha", "beta"])
prior = [priors(*p) for p in prior_params]
self.alphas = np.array([p.alpha for p in prior])
self.betas = np.array([p.beta for p in prior])
else:
self.alphas = np.ones(K)
self.betas = np.ones(K)
self.trials = np.zeros(K, dtype=np.int)
self.success = np.zeros(K, dtype=np.int)
def get_recommendation(self):
"""
for all arms, construct their beta distribution and
draw a random sample from it, then return the arm
with the maximum value random sample
"""
theta = np.random.beta(self.alphas + self.success,
self.betas + self.trials - self.success)
return np.argmax(theta)
def update_result(self, arm, converted):
"""
override the trials and success array, the success array
will only be updated if it has generated a reward
"""
self.trials[arm] += 1
if converted:
self.success[arm] += 1
return self
def experiment(T, ctr, prior_params=None):
"""
run the experiment for Thompson Sampling,
pass in ctr, the fixed ctr for each arm
or K, the total number of arms to run the experiment,
if K is supplied then it will be randomly generated
Parameters
----------
T : int
number of simulation in an experiment
ctr : float sequence, len = K (total number of arms)
the empirical click through rate for each arm
prior_params : list of float length 2 tuple, default None, (optional)
each element of the list is a tuple, where each tuple
contains the alpha and beta parameter that represents the prior
beta distribution for each arm. If not supplied
it will assume that all arms's prior starts with an uniform distribution
Returns
-------
ctr : float sequence, len = K
the supplied or the randomly generated ctr
trials, success : int 2d ndarray, shape [T, K]
trials and success recorded for each turn of the experiment
alphas, betas : float 1d ndarray, shape [K,]
the alpha and beta parameters for each arm
"""
K = len(ctr)
trials = np.zeros((T, K), dtype=np.int)
success = np.zeros((T, K), dtype=np.int)
bayes_bandit = BayesianBandit(K, prior_params)
for t in range(T):
arm = bayes_bandit.get_recommendation()
converted = np.random.rand() < ctr[arm]
bayes_bandit.update_result(arm, converted)
trials[t] = bayes_bandit.trials
success[t] = bayes_bandit.success
return ctr, trials, success, bayes_bandit.alphas, bayes_bandit.betas
def experiment_plot(ctr, trials, success):
"""
Pass in the ctr, trials and success returned
by the `experiment` function and plot
the Cumulative Number of Turns For Each Arm and
the CTR's Convergence Plot side by side
"""
T, K = trials.shape
n = np.arange(T) + 1
fig = plt.figure(figsize=(14, 7))
plt.subplot(121)
for i in range(K):
plt.loglog(n, trials[:, i], label="arm {}".format(i + 1))
plt.legend(loc="upper left")
plt.xlabel("Number of turns")
plt.ylabel("Number of turns/arm")
plt.title("Cumulative Number of Turns For Each Arm")
plt.subplot(122)
for i in range(K):
plt.semilogx(n, np.zeros(T) + ctr[i], label="arm {}'s CTR".format(i + 1))
plt.semilogx(n, (success[:, 0] + success[:, 1]) / n, label="CTR at turn t")
plt.axis([1, T, 0, 1])
plt.legend(loc="upper left")
plt.xlabel("Number of turns")
plt.ylabel("CTR")
plt.title("CTR's Convergence Plot")
return fig
# number of simulation in an experiment
T = 10000
# the empirical click through rate for each arm
ctr = 0.25, 0.35
ctr, trials, success, alphas, betas = experiment(T=T, ctr=ctr)
trials
fig = experiment_plot(ctr, trials, success)
plt.show()
"""
Explanation: There are two important things to note about the Beta distribution:
The first is the presence of the flat distribution above, specified by parameters $(1,1)$. This is the Uniform distribution. Hence the Beta distribution is a generalization of the Uniform distribution.
The second is that there is an interesting connection between the Beta distribution and the Binomial distribution. Suppose we are interested in some unknown proportion or probability $p$. We assign a $\text{Beta}(\alpha, \beta)$ prior to $p$. We observe some data generated by a Binomial process, say $X \sim \text{Binomial}(N, p)$, with $p$ still unknown. Then our posterior is again a Beta distribution, i.e. $p | X \sim \text{Beta}( \alpha + X, \beta + N -X )$. Succinctly, one can relate the two by "a Beta prior with Binomial observations creates a Beta posterior".
In light of the above two paragraphs, if we start with a $\text{Beta}(1,1)$ prior on $p$ (which is a Uniform), observe data $X \sim \text{Binomial}(N, p)$, then our posterior is $\text{Beta}(1 + X, 1 + N - X)$.
Thompson Sampling
So after assuming the priors on the probability of ctr for each webpage. To be explicit on the phrase "assuming the priors", we will assume that we're completely ignorant of these probabilities. So a very natural prior is the flat prior over 0 to 1, $\text{Beta}(\alpha=1,\beta=1)$. The algorithm then proceeds as follows:
For each turn:
Sample a random variable $X_i$ from the prior of arm $i$, for all $i$ ($K$ in total).
Select the arm with largest sample, i.e. select $i = \text{argmax}\; X_i$.
Observe the result of pulled arm $i$, and update your prior with that arm $i$.
Return to 1.
Like all the algorithms we've introduced before, Thompson Sampling suggests that we should not discard losers, but we should pick them at a decreasing rate as we gather confidence that there exist better webpages (arms). This follows because there is always a non-zero chance that a webpage with a lower ctr will get chosen, but the probability of this event decreases as we play more rounds.
End of explanation
"""
def plot_beta_dist(ctr, trials, success, alphas, betas, turns):
"""
Pass in the ctr, trials and success, alphas, betas returned
by the `experiment` function and the number of turns
and plot the beta distribution for all the arms in that turn
"""
subplot_num = len(turns) / 2
x = np.linspace(0.001, .999, 200)
fig = plt.figure(figsize=(14, 7))
for idx, turn in enumerate(turns):
plt.subplot(subplot_num, 2, idx + 1)
for i in range(len(ctr)):
y = beta(alphas[i] + success[turn, i],
betas[i] + trials[turn, i] - success[turn, i]).pdf(x)
line = plt.plot(x, y, lw=2, label="arm {}".format(i + 1))
color = line[0].get_color()
plt.fill_between(x, 0, y, alpha=0.2, color=color)
plt.axvline(x=ctr[i], color=color, linestyle="--", lw=2)
plt.title("Posteriors After {} turns".format(turn))
plt.legend(loc="upper right")
return fig
turns = [1, 100, 1000, 9999]
posterior_fig = plot_beta_dist(ctr, trials, success, alphas, betas, turns)
plt.show()
"""
Explanation: In our simulation, we gave the Bayesian bandit two webpages (arms) - one had a CTR of 0.25, the other had a CTR of 0.35. To start with, both webpages were displayed to the user with roughly equal probability. Over time, evidence accumulated that arm 2 was considerably better than arm 1. At this point the algorithm switched to displaying primarily webpage 1, and the overall CTR of the experiment converged to 0.35 (the optimal CTR).
We can also visualize our Beta distribution for each arms in different turns.
End of explanation
"""
|