repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kastnerkyle/pylearn2 | pylearn2/scripts/datasets/make_mnistplus.py | 5 | 8862 | """
Script to generate the MNIST+ dataset. The purpose of this dataset is to make a
more challenging MNIST-like dataset, with multiple factors of variation. These
factors can serve to evaluate a model's performance at learning invariant
features, or its ability to disentangle factors of variation in a multi-task
classification setting. The dataset is stored under $PYLEARN2_DATA_PATH.
The dataset variants are created as follows. For each MNIST image, we:
1. Perform a random rotation of the image (optional)
2. Rescale the image from 28x28 to 48x48, yielding variable `image`.
3.1 Extract a random patch `textured_patch` from a fixed or random image of the
Brodatz texture dataset.
3.2 Generate mask of MNIST digit outline, by thresholding MNIST digit at 0.1
3.3 Fuse MNIST digit and textured patch as follows:
textured_patch[mask] <= image[mask]; image <= textured_patch;
4. Randomly select position of light source (optional)
5. Perform embossing operation, given fixed lighting position obtained in 4.
"""
import numpy
import pickle
import pylab as pl
from copy import copy
from optparse import OptionParser
from pylearn2.datasets import mnist
from pylearn2.utils import string_utils
import warnings
try:
from PIL import Image
except ImportError:
warnings.warn("Couldn't import Image from PIL, so far make_mnistplus "
"is only supported with PIL")
OUTPUT_SIZE = 48
DOWN_SAMPLE = 1
def to_array(img):
"""
Convert PIL.Image to numpy.ndarray.
:param img: numpy.ndarray
"""
return numpy.array(img.getdata()) / 255.
def to_img(arr, os):
"""
Convert numpy.ndarray to PIL.Image
:param arr: numpy.ndarray
:param os: integer, size of output image.
"""
return Image.fromarray(arr.reshape(os, os) * 255.)
def emboss(img, azi=45., ele=18., dep=2):
"""
Perform embossing of image `img`.
:param img: numpy.ndarray, matrix representing image to emboss.
:param azi: azimuth (in degrees)
:param ele: elevation (in degrees)
:param dep: depth, (0-100)
"""
# defining azimuth, elevation, and depth
ele = (ele * 2 * numpy.pi) / 360.
azi = (azi * 2 * numpy.pi) / 360.
a = numpy.asarray(img).astype('float')
# find the gradient
grad = numpy.gradient(a)
# (it is two arrays: grad_x and grad_y)
grad_x, grad_y = grad
# getting the unit incident ray
gd = numpy.cos(ele) # length of projection of ray on ground plane
dx = gd * numpy.cos(azi)
dy = gd * numpy.sin(azi)
dz = numpy.sin(ele)
# adjusting the gradient by the "depth" factor
# (I think this is how GIMP defines it)
grad_x = grad_x * dep / 100.
grad_y = grad_y * dep / 100.
# finding the unit normal vectors for the image
leng = numpy.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/leng
uni_y = grad_y/leng
uni_z = 1./leng
# take the dot product
a2 = 255 * (dx*uni_x + dy*uni_y + dz*uni_z)
# avoid overflow
a2 = a2.clip(0, 255)
# you must convert back to uint8 /before/ converting to an image
return Image.fromarray(a2.astype('uint8'))
def extract_patch(textid, os, downsample):
"""
Extract a patch of texture #textid of Brodatz dataset.
:param textid: id of texture image to load.
:param os: size of MNIST+ output images.
:param downsample: integer, downsampling factor.
"""
temp = '${PYLEARN2_DATA_PATH}/textures/brodatz/D%i.gif' % textid
fname = string_utils.preprocess(temp)
img_i = Image.open(fname)
img_i = img_i.resize((img_i.size[0]/downsample,
img_i.size[1]/downsample), Image.BILINEAR)
x = numpy.random.randint(0, img_i.size[0] - os)
y = numpy.random.randint(0, img_i.size[1] - os)
patch = img_i.crop((x, y, x+os, y+os))
return patch, (x, y)
def gendata(enable, os, downsample, textid=None, seed=2313, verbose=False):
"""
Generate the MNIST+ dataset.
:param enable: dictionary of flags with keys ['texture', 'azimuth',
'rotation', 'elevation'] to enable/disable a given factor of variation.
:param textid: if enable['texture'], id number of the Brodatz texture to
load. If textid is None, we load a random texture for each MNIST image.
:param os: output size (width and height) of MNIST+ images.
:param downsample: factor by which to downsample texture.
:param seed: integer for seeding RNG.
:param verbose: bool
"""
rng = numpy.random.RandomState(seed)
data = mnist.MNIST('train')
test = mnist.MNIST('test')
data.X = numpy.vstack((data.X, test.X))
data.y = numpy.hstack((data.y, test.y))
del test
output = {}
output['data'] = numpy.zeros((len(data.X), os*os))
output['label'] = numpy.zeros(len(data.y))
if enable['azimuth']:
output['azimuth'] = numpy.zeros(len(data.y))
if enable['elevation']:
output['elevation'] = numpy.zeros(len(data.y))
if enable['rotation']:
output['rotation'] = numpy.zeros(len(data.y))
if enable['texture']:
output['texture_id'] = numpy.zeros(len(data.y))
output['texture_pos'] = numpy.zeros((len(data.y), 2))
for i in xrange(len(data.X)):
# get MNIST image
frgd_img = to_img(data.X[i], 28)
frgd_img = frgd_img.convert('L')
if enable['rotation']:
rot = rng.randint(0, 360)
output['rotation'][i] = rot
frgd_img = frgd_img.rotate(rot, Image.BILINEAR)
frgd_img = frgd_img.resize((os, os), Image.BILINEAR)
if enable['texture']:
if textid is None:
# extract patch from texture database. Note that texture #14
# does not exist.
textid = 14
while textid == 14:
textid = rng.randint(1, 113)
patch_img, (px, py) = extract_patch(textid, os, downsample)
patch_arr = to_array(patch_img)
# store output details
output['texture_id'][i] = textid
output['texture_pos'][i] = (px, py)
# generate binary mask for digit outline
frgd_arr = to_array(frgd_img)
mask_arr = frgd_arr > 0.1
# copy contents of masked-MNIST image into background texture
blend_arr = copy(patch_arr)
blend_arr[mask_arr] = frgd_arr[mask_arr]
# this now because the image to emboss
frgd_img = to_img(blend_arr, os)
azi = 45
if enable['azimuth']:
azi = rng.randint(0, 360)
output['azimuth'][i] = azi
ele = 18.
if enable['elevation']:
ele = rng.randint(0, 60)
output['elevation'][i] = ele
mboss_img = emboss(frgd_img, azi=azi, ele=ele)
mboss_arr = to_array(mboss_img)
output['data'][i] = mboss_arr
output['label'][i] = data.y[i]
if verbose:
pl.imshow(mboss_arr.reshape(os, os))
pl.gray()
pl.show()
fname = 'mnistplus'
if enable['azimuth']:
fname += "_azi"
if enable['rotation']:
fname += "_rot"
if enable['texture']:
fname += "_tex"
fp = open(fname+'.pkl','w')
pickle.dump(output, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', action='store_true', dest='verbose')
parser.add_option('--azimuth', action='store_true', dest='azimuth',
help='Enable random azimuth for light-source used in embossing.')
parser.add_option('--elevation', action='store_true', dest='elevation',
help='Enable random elevation for light-source used in embossing.')
parser.add_option('--rotation', action='store_true', dest='rotation',
help='Randomly rotate MNIST digit prior to embossing.')
parser.add_option('--texture', action='store_true', dest='texture',
help='Perform joint embossing of fused {MNIST + Texture} image.')
parser.add_option('--textid', action='store', type='int', dest='textid',
help='If specified, use a single texture ID for all MNIST images.',
default=None)
parser.add_option('--output_size', action='store', type='int', dest='os',
help='Integer specifying size of (square) output images.',
default=OUTPUT_SIZE)
parser.add_option('--downsample', action='store', type='int',
dest='downsample', default=DOWN_SAMPLE,
help='Downsampling factor for Brodatz textures.')
(opts, args) = parser.parse_args()
enable = {'texture': opts.texture,
'azimuth': opts.azimuth,
'rotation': opts.rotation,
'elevation': opts.elevation}
gendata(enable=enable, os=opts.os, downsample=opts.downsample,
verbose=opts.verbose, textid=opts.textid)
| bsd-3-clause |
nicproulx/mne-python | tutorials/plot_brainstorm_auditory.py | 3 | 16597 | # -*- coding: utf-8 -*-
"""
====================================
Brainstorm auditory tutorial dataset
====================================
Here we compute the evoked from raw for the auditory Brainstorm
tutorial dataset. For comparison, see [1]_ and:
http://neuroimage.usc.edu/brainstorm/Tutorials/Auditory
Experiment:
- One subject, 2 acquisition runs 6 minutes each.
- Each run contains 200 regular beeps and 40 easy deviant beeps.
- Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.
- Button pressed when detecting a deviant with the right index finger.
The specifications of this dataset were discussed initially on the
`FieldTrip bug tracker <http://bugzilla.fcdonders.nl/show_bug.cgi?id=2300>`_.
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
from mne.filter import notch_filter, filter_data
print(__doc__)
###############################################################################
# To reduce memory consumption and running time, some of the steps are
# precomputed. To run everything from scratch change this to False. With
# ``use_precomputed = False`` running time of this script can be several
# minutes even on a fast computer.
use_precomputed = True
###############################################################################
# The data was collected with a CTF 275 system at 2400 Hz and low-pass
# filtered at 600 Hz. Here the data and empty room data files are read to
# construct instances of :class:`mne.io.Raw`.
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', 'bst_auditory',
'S01_Noise_20131218_01.ds')
###############################################################################
# In the memory saving mode we use ``preload=False`` and use the memory
# efficient IO which loads the data on demand. However, filtering and some
# other functions require the data to be preloaded in the memory.
preload = not use_precomputed
raw = read_raw_ctf(raw_fname1, preload=preload)
n_times_run1 = raw.n_times
mne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])
raw_erm = read_raw_ctf(erm_fname, preload=preload)
###############################################################################
# Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference
# sensors and 2 EEG electrodes (Cz and Pz).
# In addition:
#
# - 1 stim channel for marking presentation times for the stimuli
# - 1 audio channel for the sent signal
# - 1 response channel for recording the button presses
# - 1 ECG bipolar
# - 2 EOG bipolar (vertical and horizontal)
# - 12 head tracking channels
# - 20 unused channels
#
# The head tracking channels and the unused channels are marked as misc
# channels. Here we define the EOG and ECG channels.
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick_types(meg=True, eeg=False, stim=True, misc=True, eog=True,
ecg=True)
###############################################################################
# For noise reduction, a set of bad segments have been identified and stored
# in csv files. The bad segments are later used to reject epochs that overlap
# with them.
# The file for the second run also contains some saccades. The saccades are
# removed by using SSP. We use pandas to read the data from the csv files. You
# can also view the files with your favorite text editor.
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = annotations_df['label'].values
annotations = mne.Annotations(onsets, durations, descriptions)
raw.annotations = annotations
del onsets, durations, descriptions
###############################################################################
# Here we compute the saccade and EOG projectors for magnetometers and add
# them to the raw data. The projectors are added to both runs.
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
###############################################################################
# Visually inspect the effects of projections. Click on 'proj' button at the
# bottom right corner to toggle the projectors on/off. EOG events can be
# plotted by adding the event list as a keyword argument. As the bad segments
# and saccades were added as annotations to the raw data, they are plotted as
# well.
raw.plot(block=True)
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. The power spectra are plotted
# before and after the filtering to show the effect. The drop after 600 Hz
# appears because the data was filtered during the acquisition. In memory
# saving mode we do the filtering at evoked stage, which is not something you
# usually would do.
if not use_precomputed:
meg_picks = mne.pick_types(raw.info, meg=True, eeg=False)
raw.plot_psd(tmax=np.inf, picks=meg_picks)
notches = np.arange(60, 181, 60)
raw.notch_filter(notches)
raw.plot_psd(tmax=np.inf, picks=meg_picks)
###############################################################################
# We also lowpass filter the data at 100 Hz to remove the hf components.
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double')
###############################################################################
# Epoching and averaging.
# First some parameters are defined and events extracted from the stimulus
# channel (UPPT001). The rejection thresholds are defined as peak-to-peak
# values and are in T / m for gradiometers, T for magnetometers and
# V for EOG and EEG channels.
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
###############################################################################
# The event timing is adjusted by comparing the trigger times on detected
# sound onsets on channel UADC001-4408.
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
###############################################################################
# We mark a set of bad channels that seem noisier than others. This can also
# be done interactively with ``raw.plot`` by clicking the channel name
# (or the line). The marked channels are added as bad when the browser window
# is closed.
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
###############################################################################
# The epochs (trials) are created for MEG channels. First we find the picks
# for MEG and EOG channels. Then the epochs are constructed using these picks.
# The epochs overlapping with annotated bad segments are also rejected by
# default. To turn off rejection by bad segments (as was done earlier with
# saccades) you can use keyword ``reject_by_annotation=False``.
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False,
proj=True)
###############################################################################
# We only use first 40 good epochs from each run. Since we first drop the bad
# epochs, the indices of the epochs are no longer same as in the original
# epochs collection. Investigation of the event timings reveals that first
# epoch from the second run corresponds to index 182.
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs, picks
###############################################################################
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
sfreq = evoked_std.info['sfreq']
notches = [60, 120, 180]
for evoked in (evoked_std, evoked_dev):
evoked.data[:] = notch_filter(evoked.data, sfreq, notches)
evoked.data[:] = filter_data(evoked.data, sfreq, l_freq=None,
h_freq=100.)
###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std.plot(window_title='Standard', gfp=True)
evoked_dev.plot(window_title='Deviant', gfp=True)
###############################################################################
# Show activations as topography figures.
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard')
evoked_dev.plot_topomap(times=times, title='Deviant')
###############################################################################
# We can see the MMN effect more clearly by looking at the difference between
# the two conditions. P50 and N100 are no longer visible, but MMN/P200 and
# P300 are emphasised.
evoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')
evoked_difference.plot(window_title='Difference', gfp=True)
###############################################################################
# Source estimation.
# We compute the noise covariance matrix from the empty room measurement
# and use it for the other runs.
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
###############################################################################
# The transformation is read from a file. More information about coregistering
# the data, see :ref:`ch_interactive_analysis` or
# :func:`mne.gui.coregistration`.
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
###############################################################################
# To save time and memory, the forward solution is read from a file. Set
# ``use_precomputed=False`` in the beginning of this script to build the
# forward solution from scratch. The head surfaces for constructing a BEM
# solution are read from a file. Since the data only contains MEG channels, we
# only need the inner skull surface for making the forward solution. For more
# information: :ref:`CHDBBCEJ`, :func:`mne.setup_source_space`,
# :ref:`create_bem_model`, :func:`mne.bem.make_watershed_bem`.
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
###############################################################################
# The sources are computed using dSPM method and plotted on an inflated brain
# surface. For interactive controls over the image, use keyword
# ``time_viewer=True``.
# Standard condition.
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
###############################################################################
# Deviant condition.
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
###############################################################################
# Difference.
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
| bsd-3-clause |
alvarouc/polyssifier | polyssifier/poly_utils.py | 1 | 10338 | from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import (LogisticRegression,
LinearRegression,
BayesianRidge,
Ridge, Lasso,
ElasticNet, Lars, LassoLars,
OrthogonalMatchingPursuit,
PassiveAggressiveRegressor)
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.gaussian_process import GaussianProcessRegressor
import collections
import numpy as np
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process.kernels import RBF
class MyVoter(object):
"""
Voter Classifier
Receives fitted classifiers and runs majority voting
"""
def __init__(self, estimators):
'''
estimators: List of fitted classifiers
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[clf.predict(X) for clf in self.estimators_]).T
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x)), axis=1,
arr=predictions.astype('int'))
return maj
class MyRegressionAverager(object):
"""
Regression averager
Receives fitted regressors and averages the predictions of the regressors.
"""
def __init__(self, estimators):
'''
estimators: List of fitted regressors
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[reg.predict(X) for reg in self.estimators_]).T
avg = np.average(predictions, axis=1)
return avg
class MyRegressionMedianer(object):
"""
Regression averager
Receives fitted regressors and averages the predictions of the regressors.
"""
def __init__(self, estimators):
'''
estimators: List of fitted regressors
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[reg.predict(X) for reg in self.estimators_]).T
avg = np.median(predictions, axis=1)
return avg
def build_classifiers(exclude, scale, feature_selection, nCols):
'''
Input:
- exclude: list of names of classifiers to exclude from the analysis
- scale: True or False. Scale data before fitting classifier
- feature_selection: True or False. Run feature selection before
fitting classifier
- nCols: Number of columns in input dataset to classifiers
Output:
Dictionary with classifier name as keys.
- 'clf': Classifier object
- 'parameters': Dictionary with parameters of 'clf' as keys
'''
classifiers = collections.OrderedDict()
if 'Multilayer Perceptron' not in exclude:
classifiers['Multilayer Perceptron'] = {
'clf': MLP(),
'parameters': {'hidden_layer_sizes': [(100, 50), (50, 25)],
'max_iter': [500]}
}
if 'Nearest Neighbors' not in exclude:
classifiers['Nearest Neighbors'] = {
'clf': KNeighborsClassifier(),
'parameters': {'n_neighbors': [1, 5, 10, 20]}}
if 'SVM' not in exclude:
classifiers['SVM'] = {
'clf': SVC(C=1, probability=True, cache_size=10000,
class_weight='balanced'),
'parameters': {'kernel': ['rbf', 'poly'],
'C': [0.01, 0.1, 1]}}
if 'Linear SVM' not in exclude:
classifiers['Linear SVM'] = {
'clf': LinearSVC(dual=False, class_weight='balanced'),
'parameters': {'C': [0.01, 0.1, 1],
'penalty': ['l1', 'l2']}}
if 'Decision Tree' not in exclude:
classifiers['Decision Tree'] = {
'clf': DecisionTreeClassifier(max_depth=None,
max_features='auto'),
'parameters': {}}
if 'Random Forest' not in exclude:
classifiers['Random Forest'] = {
'clf': RandomForestClassifier(max_depth=None,
n_estimators=10,
max_features='auto'),
'parameters': {'n_estimators': list(range(5, 20))}}
if 'Logistic Regression' not in exclude:
classifiers['Logistic Regression'] = {
'clf': LogisticRegression(fit_intercept=True, solver='lbfgs',
penalty='l2'),
'parameters': {'C': [0.001, 0.1, 1]}}
if 'Naive Bayes' not in exclude:
classifiers['Naive Bayes'] = {
'clf': GaussianNB(),
'parameters': {}}
# classifiers['Voting'] = {}
def name(x):
"""
:param x: The name of the classifier
:return: The class of the final estimator in lower case form
"""
return x['clf']._final_estimator.__class__.__name__.lower()
for key, val in classifiers.items():
if not scale and not feature_selection:
break
steps = []
if scale:
steps.append(StandardScaler())
if feature_selection:
steps.append(SelectKBest(f_regression, k='all'))
steps.append(classifiers[key]['clf'])
classifiers[key]['clf'] = make_pipeline(*steps)
# Reorganize paramenter list for grid search
new_dict = {}
for keyp in classifiers[key]['parameters']:
new_dict[name(classifiers[key]) + '__' +
keyp] = classifiers[key]['parameters'][keyp]
classifiers[key]['parameters'] = new_dict
if nCols > 5 and feature_selection:
classifiers[key]['parameters']['selectkbest__k'] = np.linspace(
np.round(nCols / 5), nCols, 5).astype('int').tolist()
return classifiers
def build_regressors(exclude, scale, feature_selection, nCols):
'''
This method builds an ordered dictionary of regressors, where the key is the name of the
regressor and the value of each key contains a standard dictionary with two keys itself. The first key called
'reg' points to the regression object, which is created by scikit learn. The second key called 'parameters'
points to another regular map containing the parameters which are associated with the particular regression model.
These parameters are used by grid search in polyssifier.py when finding the best model. If parameters are not
defined then grid search is not performed on that particular regression model, so the model's default parameters
are used instead to find the best model for the particular data.
'''
regressors = collections.OrderedDict()
if 'Linear Regression' not in exclude:
regressors['Linear Regression'] = {
'reg': LinearRegression(),
'parameters': {} # Best to leave default parameters
}
if 'Bayesian Ridge' not in exclude:
regressors['Bayesian Ridge'] = {
'reg': BayesianRidge(),
'parameters': {} # Investigate if alpha and lambda parameters should be changed
}
if 'PassiveAggressiveRegressor' not in exclude:
regressors['PassiveAggressiveRegressor'] = {
'reg': PassiveAggressiveRegressor(),
'parameters': {'C': [0.5, 1.0, 1.5]
}
}
if 'GaussianProcessRegressor' not in exclude:
regressors['GaussianProcessRegressor'] = {
'reg': GaussianProcessRegressor(),
'parameters': {
'alpha': [0.01, 0.1, 1.0, 10.0],
'kernel': [RBF(x) for x in [0.01, 1.0, 100.0, 1000.0]],
}
}
if 'Ridge' not in exclude:
regressors['Ridge'] = {
'reg': Ridge(),
'parameters': {
'alpha': [0.25, 0.50, 0.75, 1.00]
}
}
if 'Lasso' not in exclude:
regressors['Lasso'] = {
'reg': Lasso(),
'parameters': {
'alpha': [0.25, 0.50, 0.75, 1.00]
}
}
if 'Lars' not in exclude:
regressors['Lars'] = {
'reg': Lars(),
'parameters': {} # Best to leave the default parameters
}
if 'LassoLars' not in exclude:
regressors['LassoLars'] = {
'reg': LassoLars(),
'parameters': {'alpha': [0.25, 0.50, 0.75, 1.00, 10.0]}
}
if 'OrthogonalMatchingPursuit' not in exclude:
regressors['OrthogonalMatchingPursuit'] = {
'reg': OrthogonalMatchingPursuit(),
'parameters': {} # Best to leave default parameters
}
if 'ElasticNet' not in exclude:
regressors['ElasticNet'] = {
'reg': ElasticNet(),
'parameters': {'alpha': [0.25, 0.50, 0.75, 1.00],
'l1_ratio': [0.25, 0.50, 0.75, 1.00]}
}
def name(x):
"""
:param x: The name of the regressor
:return: The class of the final regression estimator in lower case form
"""
return x['reg']._final_estimator.__class__.__name__.lower()
for key, val in regressors.items():
if not scale and not feature_selection:
break
steps = []
if scale:
steps.append(StandardScaler())
if feature_selection:
steps.append(SelectKBest(f_regression, k='all'))
steps.append(regressors[key]['reg'])
regressors[key]['reg'] = make_pipeline(*steps)
# Reorganize paramenter list for grid search
new_dict = {}
for keyp in regressors[key]['parameters']:
new_dict[name(regressors[key]) + '__' +
keyp] = regressors[key]['parameters'][keyp]
regressors[key]['parameters'] = new_dict
if nCols > 5 and feature_selection:
regressors[key]['parameters']['selectkbest__k'] = np.linspace(
np.round(nCols / 5), nCols, 5).astype('int').tolist()
return regressors
| gpl-2.0 |
UMWRG/HydraPlatform | HydraServer/python/HydraServer/plugins/timeseries_functions.py | 2 | 5406 | # (c) Copyright 2013, 2014, University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
from spyne.decorator import rpc
from spyne.model.primitive import Integer, Unicode, AnyDict
from HydraServer.soap_server.hydra_base import HydraService
from HydraServer.lib.data import get_dataset
from HydraLib.HydraException import HydraError
from HydraServer.util import get_val
import logging
import numpy
import json
log = logging.getLogger(__name__)
op_map = {
'add' : lambda x, y: numpy.add(x, y),
'subtract' : lambda x, y: numpy.subtract(x, y),
'multiply' : lambda x, y: numpy.multiply(x, y),
'divide' : lambda x, y: numpy.divide(x, y),
'avg' : lambda x : numpy.mean(x),
'stddev' : lambda x : numpy.std(x),
}
class Service(HydraService):
__service_name__ = "TimeseriesService"
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def subtract_datasets(ctx, dataset_ids):
"""
Subtract the value of dataset[1] from the value of dataset[0].
subtract dataset[2] from result etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('subtract', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def add_datasets(ctx, dataset_ids):
"""
Add the value of dataset[0] to the value of dataset[1] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('add', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def multiply_datasets(ctx, dataset_ids):
"""
Multiply the value of dataset[0] by the value of dataset[1] and the result
by the value of dataset[2] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('multiply', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def divide_datasets(ctx, dataset_ids):
"""
Divide the value of dataset[0] by the value of dataset[1], the
result of which is divided by the value of dataset[2] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('divide', dataset_ids, **ctx.in_header.__dict__)
def _perform_op_on_datasets(op, dataset_ids, **kwargs):
datasets = []
for dataset_id in dataset_ids:
datasets.append(get_dataset(dataset_id, **kwargs))
data_type = None
vals = []
for d in datasets:
if data_type is None:
data_type = d.data_type
if data_type == 'descriptor':
raise HydraError("Data must be numerical")
else:
if d.data_type != d.data_type:
raise HydraError("Data types do not match.")
dataset_val = get_val(d)
if data_type == 'timeseries':
dataset_val = dataset_val.astype('float')
vals.append(dataset_val)
_op = op_map[op]
op_result = vals[0]
for v in vals[1:]:
try:
op_result = _op(op_result, v)
except:
raise HydraError("Unable to perform operation %s on values %s and %s"
%(op, op_result, v))
if data_type == 'timeseries':
return op_result.to_json(date_format='iso', date_unit='ns')
elif data_type == 'array':
return json.dumps(list(op_result))
else:
return json.dumps(str(op_result))
| gpl-3.0 |
kastnerkyle/pylearn2 | pylearn2/scripts/datasets/step_through_small_norb.py | 49 | 3123 | #! /usr/bin/env python
"""
A script for sequentially stepping through SmallNORB, viewing each image and
its label.
Intended as a demonstration of how to iterate through NORB images,
and as a way of testing SmallNORB's StereoViewConverter.
If you just want an image viewer, consider
pylearn2/scripts/show_binocular_grayscale_images.py,
which is not specific to SmallNORB.
"""
__author__ = "Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __author__
__license__ = "3-clause BSD"
__maintainer__ = __author__
__email__ = "mkg alum mit edu (@..)"
import argparse, pickle, sys
from matplotlib import pyplot
from pylearn2.datasets.norb import SmallNORB
from pylearn2.utils import safe_zip
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Step-through visualizer for SmallNORB dataset")
parser.add_argument("--which_set",
default='train',
required=True,
help=("'train', 'test', or the path to a "
"SmallNORB .pkl file"))
return parser.parse_args()
def load_norb(args):
if args.which_set in ('test', 'train'):
return SmallNORB(args.which_set, True)
else:
norb_file = open(args.which_set)
return pickle.load(norb_file)
args = parse_args()
norb = load_norb(args)
topo_space = norb.view_converter.topo_space # does not include label space
vec_space = norb.get_data_specs()[0].components[0]
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.suptitle("Press space to step through, or 'q' to quit.")
def draw_and_increment(iterator):
"""
Draws the image pair currently pointed at by the iterator,
then increments the iterator.
"""
def draw(batch_pair):
for axis, image_batch in safe_zip(axes, batch_pair):
assert image_batch.shape[0] == 1
grayscale_image = image_batch[0, :, :, 0]
axis.imshow(grayscale_image, cmap='gray')
figure.canvas.draw()
def get_values_and_increment(iterator):
try:
vec_stereo_pair, labels = norb_iter.next()
except StopIteration:
return (None, None)
topo_stereo_pair = vec_space.np_format_as(vec_stereo_pair,
topo_space)
return topo_stereo_pair, labels
batch_pair, labels = get_values_and_increment(norb_iter)
draw(batch_pair)
norb_iter = norb.iterator(mode='sequential',
batch_size=1,
data_specs=norb.get_data_specs())
def on_key_press(event):
if event.key == ' ':
draw_and_increment(norb_iter)
if event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
draw_and_increment(norb_iter)
pyplot.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
OpenMined/PySyft | benchmarks/macro_executor.py | 1 | 3935 | # stdlib
from datetime import date
import json
import os
from pathlib import Path
import subprocess
from time import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
# third party
import pyarrow.parquet as pq
# syft absolute
import syft as sy
from syft.core.adp.data_subject_list import DataSubjectList
from syft.core.node.common.node_service.user_manager.user_messages import (
UpdateUserMessage,
)
from syft.util import download_file
from syft.util import get_root_data_path
benchmark_report: dict = {}
today = date.today()
date = today.strftime("%B %d, %Y")
benchmark_report["date"] = date
def get_git_revision_short_hash() -> str:
return (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.decode("ascii")
.strip()
)
benchmark_report["git_revision_hash"] = get_git_revision_short_hash()
def download_spicy_bird_benchmark(
sizes: Optional[List[str]] = None,
) -> Tuple[Dict[str, Path], List[str]]:
sizes = sizes if sizes else ["100K", "250K", "500K", "750K", "1M", "1B"]
file_suffix = "_rows_dataset_sample.parquet"
BASE_URL = "https://raw.githubusercontent.com/madhavajay/datasets/main/spicy_bird/"
folder_name = "spicy_bird"
dataset_path = get_root_data_path() / folder_name
paths = []
for size in sizes:
filename = f"{size}{file_suffix}"
full_path = dataset_path / filename
url = f"{BASE_URL}{filename}"
if not os.path.exists(full_path):
print(url)
path = download_file(url=url, full_path=full_path)
else:
path = Path(full_path)
paths.append(path)
return dict(zip(sizes, paths)), sizes
key_size = "1B"
files, ordered_sizes = download_spicy_bird_benchmark(sizes=[key_size])
data_file = files[key_size]
benchmark_report["data_row_size"] = key_size
t0 = time()
df = pq.read_table(data_file)
end_time = time()
tf = round(time() - t0, 4)
print(f"Time taken to read parquet file: {round(tf, 2)} seconds")
benchmark_report["read_parquet"] = tf
t0 = time()
impressions = df["impressions"].to_numpy()
data_subjects = DataSubjectList.from_series(df["user_id"])
tf = round(time() - t0, 4)
benchmark_report["data_subject_list_creation"] = tf
print(f"Time taken to create inputs for Syft Tensor: {round(tf,2)} seconds")
t0 = time()
tweets_data = sy.Tensor(impressions).annotate_with_dp_metadata(
lower_bound=70, upper_bound=2000, data_subjects=data_subjects
)
tf = round(time() - t0, 4)
print(f"Time taken to make Private Syft Tensor: {round(tf,2)} seconds")
benchmark_report["make_private_syft_tensor"] = tf
# login to domain
domain_node = sy.login(email="info@openmined.org", password="changethis", port=9082)
# Upgrade admins budget
content = {"user_id": 1, "budget": 9_999_999}
domain_node._perform_grid_request(grid_msg=UpdateUserMessage, content=content)
dataset_name = "1B Tweets dataset"
t0 = time()
domain_node.load_dataset(
assets={"1B Tweets dataset": tweets_data},
name=dataset_name,
description=" Tweets- 1B rows",
)
tf = round(time() - t0, 3)
print(f"Time taken to load {dataset_name} dataset: {tf} seconds")
benchmark_report["load_dataset"] = tf
data = domain_node.datasets[-1]["1B Tweets dataset"]
print(data)
sum_result = data.sum()
try:
t0 = time()
sum_result.block
tf = round(time() - t0, 3)
except Exception as e:
print(e)
print(f"Time taken to get sum: {tf} seconds")
benchmark_report["get_sum"] = tf
# Sum result publish
published_result = sum_result.publish(sigma=1e6)
t0 = time()
published_result.block
tf = round(time() - t0, 3)
print(f"Time taken to publish: {tf} seconds")
benchmark_report["publish"] = tf
print(benchmark_report)
benchmark_report_json = json.dumps(benchmark_report, indent=4)
print(benchmark_report_json)
with open("macro_benchmark.json", "w") as outfile:
outfile.write(benchmark_report_json)
| apache-2.0 |
shangwuhencc/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 265 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py | 53 | 4253 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Note: if `num_epochs` is not `None`, local counter `epochs` will be created
by relevant function. Use `local_variables_initializer()` to initialize
local variables.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
items_to_tensors = dict(zip(items, tensors))
if record_key in items_to_tensors:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items_to_tensors[record_key] = key
super(DatasetDataProvider, self).__init__(
items_to_tensors=items_to_tensors,
num_samples=dataset.num_samples)
| apache-2.0 |
wavycloud/pyboto3 | pyboto3/glue.py | 1 | 692979 | "'''\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 WavyCloud\n\nPermission is hereby granted, free (...TRUNCATED) | mit |
shangwuhencc/scikit-learn | examples/linear_model/plot_ransac.py | 249 | 1673 | "\"\"\"\n===========================================\nRobust linear model estimation using RANSAC\n=(...TRUNCATED) | bsd-3-clause |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 15