repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
CCI-Tools/cate-core | cate/ops/utility.py | 1 | 12435 | # The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module provides general utility operations that wrap specific ``xarray`` functions.
The intention is to make available the ``xarray`` API as a set of general, domain-independent
utility functions.
All operations in this module are tagged with the ``"utility"`` tag.
"""
import pandas as pd
import xarray as xr
from datetime import timezone
from cate.core.ds import NetworkError, DataAccessError
from cate.core.op import op, op_input, op_return
from cate.core.types import DatasetLike, PointLike, TimeLike, DictLike, Arbitrary, Literal, ValidationError
from cate.util.monitor import Monitor
@op(tags=['utility'])
@op_input('ds_1', data_type=DatasetLike)
@op_input('ds_2', data_type=DatasetLike)
@op_input('ds_3', data_type=DatasetLike)
@op_input('ds_4', data_type=DatasetLike)
@op_input('join', value_set=["outer", "inner", "left", "right", "exact"])
@op_input('compat', value_set=["identical", "equals", "broadcast_equals", "no_conflicts"])
def merge(ds_1: DatasetLike.TYPE,
ds_2: DatasetLike.TYPE,
ds_3: DatasetLike.TYPE = None,
ds_4: DatasetLike.TYPE = None,
join: str = 'outer',
compat: str = 'no_conflicts') -> xr.Dataset:
"""
Merge up to four datasets to produce a new dataset with combined variables from each input dataset.
This is a wrapper for the ``xarray.merge()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.merge.html#xarray.Dataset.merge
The *compat* argument indicates how to compare variables of the same name for potential conflicts:
* "broadcast_equals": all values must be equal when variables are broadcast
against each other to ensure common dimensions.
* "equals": all values and dimensions must be the same.
* "identical": all values, dimensions and attributes must be the same.
* "no_conflicts": only values which are not null in both datasets must be equal.
The returned dataset then contains the combination of all non-null values.
:param ds_1: The first input dataset.
:param ds_2: The second input dataset.
:param ds_3: An optional 3rd input dataset.
:param ds_4: An optional 4th input dataset.
:param join: How to combine objects with different indexes.
:param compat: How to compare variables of the same name for potential conflicts.
:return: A new dataset with combined variables from each input dataset.
"""
ds_1 = DatasetLike.convert(ds_1)
ds_2 = DatasetLike.convert(ds_2)
ds_3 = DatasetLike.convert(ds_3)
ds_4 = DatasetLike.convert(ds_4)
datasets = []
for ds in (ds_1, ds_2, ds_3, ds_4):
if ds is not None:
included = False
for ds2 in datasets:
if ds is ds2:
included = True
if not included:
datasets.append(ds)
if len(datasets) == 0:
raise ValidationError('At least two different datasets must be given')
elif len(datasets) == 1:
return datasets[0]
else:
return xr.merge(datasets, compat=compat, join=join)
@op(tags=['utility'])
@op_input('ds', data_type=DatasetLike)
@op_input('point', data_type=PointLike, units='degree')
@op_input('time', data_type=TimeLike)
@op_input('indexers', data_type=DictLike)
@op_input('method', value_set=['nearest', 'ffill', 'bfill'])
def sel(ds: DatasetLike.TYPE,
point: PointLike.TYPE = None,
time: TimeLike.TYPE = None,
indexers: DictLike.TYPE = None,
method: str = 'nearest') -> xr.Dataset:
"""
Return a new dataset with each array indexed by tick labels along the specified dimension(s).
This is a wrapper for the ``xarray.sel()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.sel.html#xarray.Dataset.sel
:param ds: The dataset from which to select.
:param point: Optional geographic point given by longitude and latitude
:param time: Optional time
:param indexers: Keyword arguments with names matching dimensions and values given by scalars,
slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be
a dict-like object with keys matching index level names.
:param method: Method to use for inexact matches:
* None: only exact matches
* ``pad`` / ``ffill``: propagate last valid index value forward
* ``backfill`` / ``bfill``: propagate next valid index value backward
* ``nearest`` (default): use nearest valid index value
:return: A new Dataset with the same contents as this dataset, except each variable and dimension
is indexed by the appropriate indexers. In general, each variable's data will be a view of the
variable's data in this dataset.
"""
ds = DatasetLike.convert(ds)
point = PointLike.convert(point)
time = TimeLike.convert(time)
indexers = DictLike.convert(indexers)
indexers = dict(indexers or {})
if point is not None:
indexers.setdefault('lon', point.x)
indexers.setdefault('lat', point.y)
if time is not None:
indexers.setdefault('time', time)
# Filter out non-existent coordinates
indexers = {name: value for name, value in indexers.items() if name in ds.coords}
return ds.sel(method=method, **indexers)
@op(tags=['utility'])
def from_data_frame(df: pd.DataFrame) -> xr.Dataset:
"""
Convert the given dataframe to an xarray dataset.
This is a wrapper for the ``xarray.from_dataframe()`` function.
For documentation refer to xarray documentation at
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.from_dataframe.html#xarray.Dataset.from_dataframe
:param df: Dataframe to convert
:return: A dataset created from the given dataframe
"""
return xr.Dataset.from_dataframe(df)
@op(tags=['utility'])
@op_input('value', data_type=Arbitrary)
@op_return(data_type=Arbitrary)
def identity(value: Arbitrary.TYPE) -> Arbitrary.TYPE:
"""
Return the given value.
This operation can be useful to create constant resources to be used as input for other operations.
:param value: An arbitrary (Python) value.
"""
return value
@op(tags=['utility'])
@op_input('value', data_type=Literal)
@op_return(data_type=Arbitrary)
def literal(value: Literal.TYPE) -> Arbitrary.TYPE:
"""
Return the given value.
This operation can be useful to create constant resources to be used as input for other operations.
:param value: An arbitrary (Python) literal.
"""
return Literal.convert(value)
@op(tags=['utility'])
def dummy_ds(lon_dim: int = 360,
lat_dim: int = 180,
time_dim: int = 5) -> xr.Dataset:
"""
Create a dummy dataset.
:param lon_dim: Number of grid cells in longitude direction
:param lat_dim: Number of grid cells in latitude direction
:param time_dim: Number of time steps
:return: a dummy dataset
"""
import numpy as np
temperature = 15 + 8 * np.random.randn(time_dim, lat_dim, lon_dim)
precipitation = 10 * np.random.rand(time_dim, lat_dim, lon_dim)
lon_delta = 360. / lon_dim
lat_delta = 180. / lat_dim
lon = np.arange(-180. + 0.5 * lon_delta, 180., lon_delta)
lat = np.arange(-90. + 0.5 * lat_delta, 90., lat_delta)
time = pd.date_range('2014-09-06', periods=time_dim)
return xr.Dataset({'temperature': (['time', 'lat', 'lon'], temperature),
'precipitation': (['time', 'lat', 'lon'], precipitation)},
coords={'lon': lon,
'lat': lat,
'time': time,
'reference_time': pd.Timestamp('2014-09-05', tzinfo=timezone.utc)})
_ERROR_TYPES = {
'Value': ValueError,
'OS': OSError,
'Memory': MemoryError,
'Network': NetworkError,
'Data Access': DataAccessError,
'Validation': ValidationError,
}
@op(tags=['utility'])
@op_input('step_duration', units='seconds')
@op_input('error_type', value_set=['Value', 'OS', 'Memory', 'Network', 'Data Access', 'Validation'])
def no_op(num_steps: int = 20,
step_duration: float = 0.5,
fail_before: bool = False,
fail_after: bool = False,
error_type: str = 'Value',
monitor: Monitor = Monitor.NONE) -> bool:
"""
An operation that basically does nothing but spending configurable time.
It may be useful for testing purposes.
:param num_steps: Number of steps to iterate.
:param step_duration: How much time to spend in each step in seconds.
:param fail_before: If the operation should fail before spending time doing nothing (raise a ValidationError).
:param fail_after: If the operation should fail after spending time doing nothing (raise a ValueError).
:param error_type: The type of error to raise.
:param monitor: A progress monitor.
:return: Always True
"""
import time
with monitor.starting('Computing nothing', num_steps):
if fail_before:
error_class = _ERROR_TYPES[error_type]
raise error_class(f'This is a test: intentionally failed with a {error_type} error'
f' before {num_steps} times doing anything.')
for i in range(num_steps):
time.sleep(step_duration)
monitor.progress(1.0, 'Step %s of %s doing nothing' % (i + 1, num_steps))
if fail_after:
error_class = _ERROR_TYPES[error_type]
raise error_class(f'Intentionally failed failed with a {error_type} error'
f' after {num_steps} times doing nothing.')
return True
@op(tags=['utility', 'internal'])
@op_input('method', value_set=['backfill', 'bfill', 'pad', 'ffill'])
def pandas_fillna(df: pd.DataFrame,
value: float = None,
method: str = None,
limit: int = None,
**kwargs) -> pd.DataFrame:
"""
Return a new dataframe with NaN values filled according to the given value
or method.
This is a wrapper for the ``pandas.fillna()`` function For additional
keyword arguments and information refer to pandas documentation at
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html
:param df: The dataframe to fill
:param value: Value to fill
:param method: Method according to which to fill NaN. ffill/pad will
propagate the last valid observation to the next valid observation.
backfill/bfill will propagate the next valid observation back to the last
valid observation.
:param limit: Maximum number of NaN values to forward/backward fill.
:return: A dataframe with nan values filled with the given value or according to the given method.
"""
# The following code is needed, because Pandas treats any kw given in kwargs as being set, even if just None.
kwargs = dict(kwargs)
if value:
kwargs.update(value=value)
if method:
kwargs.update(method=method)
if limit:
kwargs.update(limit=limit)
return df.fillna(**kwargs)
| mit |
marionleborgne/nupic.research | htmresearch/support/junit_testing.py | 9 | 8727 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
helpStr = """
Methods to run unit tests.
"""
import itertools
import numpy
import os
import plotly.plotly as py
from plotly.graph_objs import Box, Figure, Histogram, Layout
from prettytable import PrettyTable
from scipy.stats import skew
from htmresearch.frameworks.nlp.classification_model import ClassificationModel
from htmresearch.frameworks.nlp.model_factory import (
createModel, getNetworkConfig)
from htmresearch.support.csv_helper import readDataAndReshuffle
# There should be one "htm" model for each htm config entry.
nlpModelTypes = [
"CioDocumentFingerprint",
"CioWordFingerprint",
"htm",
"htm",
"htm",
"Keywords"]
htmConfigs = {
("HTM_sensor_knn", "data/network_configs/sensor_knn.json"),
("HTM_sensor_simple_tp_knn", "data/network_configs/sensor_simple_tp_knn.json"),
("HTM_sensor_tm_knn", "data/network_configs/sensor_tm_knn.json"),
}
# Some values of k we know work well.
kValues = { "Keywords": 21 }
def setupExperiment(args):
"""
Create model according to args, train on training data, save model,
restore model.
@return newModel (ClassificationModel) The restored NLP model.
@return dataSet (list) Each item is a list representing a data sample, with
the text string, list of label indices, and the sample ID.
"""
dataSet, labelRefs, _, _ = readDataAndReshuffle(args)
args.numLabels = len(labelRefs)
# Create a model, train it, save it, reload it
model = instantiateModel(args)
model = trainModel(model, dataSet, labelRefs, args.verbosity)
model.save(args.modelDir)
newModel = ClassificationModel.load(args.modelDir)
return newModel, dataSet
def instantiateModel(args):
"""
Set some specific arguments and return an instance of the model we will use.
"""
args.networkConfig = getNetworkConfig(args.networkConfigPath)
args.k = kValues.get(args.modelName, 1)
return createModel(**vars(args))
def trainModel(model, trainingData, labelRefs, verbosity=0):
"""
Train the given model on trainingData. Return the trained model instance.
"""
modelName = repr(model).split()[0].split(".")[-1]
print
print "===================Training {} on sample text================".format(
modelName)
if verbosity > 0:
printTemplate = PrettyTable(["ID", "Document", "Label"])
printTemplate.align = "l"
printTemplate.header_style = "upper"
for (document, labels, docId) in trainingData:
if verbosity > 0:
docStr = unicode(document, errors='ignore')[0:100]
printTemplate.add_row([docId, docStr, labelRefs[labels[0]]])
model.trainDocument(document, labels, docId)
if verbosity > 0:
print printTemplate
return model
def testModel(model, testData, categorySize, verbosity=0):
"""
Test the given model on testData, print out and return results metrics.
For each data sample in testData the model infers the similarity to each other
sample; distances are number of bits apart. We then find the "ranks" of true
positive (TP) documents -- those that are in the same category as the test
document. Ideally these ranks will be low, and a perfect result would be ranks
0-categorySize.
The stats we use to describe these ranks are mean and skewness -- about 0 for
normally distributed data, and a skewness value > 0 means that there is more
weight in the left tail of the distribution. For example,
[10, 11, 12, 13, 14, 15] --> mean=12.5, skew=0.0
[0, 1, 2, 3, 4, 72] --> mean=13.7, skew=1.8
@param categorySize (int) Number of documents per category; these unit tests
use datasets with an exact number of docs in each category.
@return (numpy array) Rank positions of TPs for all test instances.
@return avgRanks (numpy array) Average rank positions of TPs -- length is the
categorySize.
@return avgStats (numpy array) Average stats of the TP ranks -- length is the
categorySize.
"""
modelName = repr(model).split()[0].split(".")[-1]
print
print "===================Testing {} on sample text==================".format(
modelName)
if verbosity > 0:
print
printTemplate = PrettyTable(["ID", "Document", "TP", "Ranks (Mean, Skew)"])
printTemplate.align = "l"
printTemplate.header_style = "upper"
allRanks = []
summedRanks = numpy.zeros(categorySize)
totalTPs = 0
for (document, labels, docId) in testData:
_, sortedIds, sortedDistances = model.inferDocument(
document, returnDetailedResults=True, sortResults=True)
# Compute TPs for this document
expectedCategory = docId / 100
truePositives = 0
for i in xrange(categorySize):
if (i < len(sortedIds)) and sortedIds[i]/100 == expectedCategory:
truePositives += 1
totalTPs += truePositives
if (verbosity >= 1) and (truePositives < categorySize):
print "\nIncorrect inference result:"
print "docId=",docId,"document=",document
print "sortedIds=",sortedIds
print "truePositives = ",truePositives
# Compute the rank metrics for this document
if len(sortedIds) > 0:
ranks = numpy.array(
[i for i, index in enumerate(sortedIds) if index/100 == expectedCategory])
allRanks.extend(ranks)
summedRanks += ranks
ranksMean = round(ranks.mean(), 2)
ranksSkew = round(skew(ranks), 2)
if verbosity > 0:
docStr = unicode(document, errors='ignore')[0:100]
printTemplate.add_row(
[docId, docStr, truePositives, (ranksMean, ranksSkew)])
lengthOfTest = float(len(testData))
avgRanks = summedRanks/lengthOfTest
avgStats = (round(avgRanks.mean(), 2), round(skew(avgRanks), 2))
if verbosity > 0:
print printTemplate
print
print "Averages across all test documents:"
print "TPs =", totalTPs/lengthOfTest
print "Rank metrics (mean, skew) = ({}, {})".format(avgStats[0], avgStats[1])
return numpy.array(allRanks), avgRanks, avgStats
def printRankResults(testName, avgRanks, avgStats):
""" Print the ranking metric results."""
printTemplate = "{0:<32}|{1:<10}"
print
print
print "Averaged rank metrics for {}:".format(testName)
print printTemplate.format("Avg. ranks per doc", avgRanks)
print printTemplate.format("Avg. mean and skew", avgStats)
def plotResults(ranksArrays, ranks, maxRank, testName="JUnit Test"):
""" Plot a histogram of the ranks.
@param ranksArrays (dict) Keys: model names. Values: List of TP ranks.
@param ranks (dict) Keys: model names. Values: Averaged TP ranks.
@param maxRank (int) Highest rank of TP possible.
@return (str) Plot URLs.
"""
py.sign_in(os.environ["PLOTLY_USERNAME"], os.environ["PLOTLY_API_KEY"])
colors = ["rgba(93, 164, 214, 0.5)", "rgba(255, 144, 14, 0.5)",
"rgba(44, 160, 101, 0.5)", "rgba(255, 65, 54, 0.5)",
"rgba(207, 114, 255, 0.5)", "rgba(193, 42, 72, 0.5)"]
histogramTraces = []
for i, (modelName, allRanks) in enumerate(ranksArrays.iteritems()):
# Display distribution stats in legend
mean = round(allRanks.mean(), 2)
sk = round(skew(allRanks), 2)
# Setup histogram for this model
histogramTraces.append(Histogram(
y=allRanks,
name="{}: ({}, {})".format(modelName, mean, sk),
autobiny=False,
ybins=dict(
start=0.0,
end=maxRank,
size=1.0,
),
marker=dict(
color=colors[i],
),
opacity=0.7,
))
histogramLayout = Layout(
title="{} - Where are the True Positives?".format(testName),
xaxis=dict(
title="Count",
),
yaxis=dict(
title="Rank of TPs",
range=[maxRank, 0],
),
barmode="overlay",
showlegend=True,
)
histogramFig = Figure(data=histogramTraces, layout=histogramLayout)
histogramURL = py.plot(histogramFig)
return histogramURL
| agpl-3.0 |
lilleswing/deepchem | examples/uv/UV_datasets.py | 8 | 4703 | """
UV dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import time
import numpy as np
import deepchem as dc
from uv_features import uv_descriptors
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
print("Shard %d has %d missing entries."
% (i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
#transformers = [
# dc.trans.LogTransformer(transform_X=True),
# dc.trans.NormalizationTransformer(transform_y=True,
# dataset=train_dataset)]
return transformers
def remove_UV_negative_entries(dataset):
"""Remove negative entries from UV dataset.
Negative entries are malformed for UV dataset. Remove them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
malformed = np.where(y <= 0)
y[malformed] = 0
w[malformed] = 0
dataset.set_shard(i, X, y, w, ids)
def gen_uv(UV_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=10000):
"""Load UV datasets."""
train_files = ("UV_training_disguised_combined_full.csv.gz")
valid_files = ("UV_test1_disguised_combined_full.csv.gz")
test_files = ("UV_test2_disguised_combined_full.csv.gz")
# Featurize UV dataset
print("About to featurize UV dataset.")
featurizer = dc.feat.UserDefinedFeaturizer(uv_descriptors)
loader = dc.data.UserCSVLoader(
tasks=UV_tasks, id_field="Molecule", featurizer=featurizer)
train_datasets, valid_datasets, test_datasets = [], [], []
print("Featurizing train datasets")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
print("Featurizing valid datasets")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
print("Featurizing test datasets")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
print("Remove missing entries from datasets.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
print("Remove malformed datapoints from UV dataset.")
remove_UV_negative_entries(train_dataset)
remove_UV_negative_entries(valid_dataset)
remove_UV_negative_entries(test_dataset)
print("Transforming datasets with transformers.")
transformers = get_transformers(train_dataset)
raw_train_dataset = train_dataset
for transformer in transformers:
print("Performing transformations with %s"
% transformer.__class__.__name__)
print("Transforming dataset")
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
print("Shuffling order of train dataset.")
train_dataset.sparse_shuffle()
print("Moving directories")
raw_train_dataset.move(raw_train_dir)
train_dataset.move(train_dir)
valid_dataset.move(valid_dir)
test_dataset.move(test_dir)
return (raw_train_dataset, train_dataset, valid_dataset, test_dataset)
def load_uv(shard_size):
"""Loads uv datasets. Generates if not stored already."""
UV_tasks = (['logTIC'] +
['w__%d' % i for i in range(210, 401)])
current_dir = os.path.dirname(os.path.realpath(__file__))
raw_train_dir = os.path.join(current_dir, "raw_train_dir")
train_dir = os.path.join(current_dir, "train_dir")
valid_dir = os.path.join(current_dir, "valid_dir")
test_dir = os.path.join(current_dir, "test_dir")
if (os.path.exists(raw_train_dir) and
os.path.exists(train_dir) and
os.path.exists(valid_dir) and
os.path.exists(test_dir)):
print("Reloading existing datasets")
raw_train_dataset = dc.data.DiskDataset(raw_train_dir)
train_dataset = dc.data.DiskDataset(train_dir)
valid_dataset = dc.data.DiskDataset(valid_dir)
test_dataset = dc.data.DiskDataset(test_dir)
else:
print("Featurizing datasets")
(raw_train_dataset, train_dataset, valid_dataset, test_dataset) = \
gen_uv(UV_tasks, raw_train_dir, train_dir, valid_dir, test_dir,
shard_size=shard_size)
transformers = get_transformers(raw_train_dataset)
return UV_tasks, (train_dataset, valid_dataset, test_dataset), transformers
| mit |
Athemis/lot | lot.py | 1 | 43683 | #!/usr/bin/env python
try:
import libtcodpy as libtcod
except ImportError:
raise ImportError('----- libtcod.py could not be loaded. -----')
import math
import textwrap
import shelve
try:
import numpy as np
except ImportError:
raise ImportError('----- NumPy must be installed. -----')
# actual size of the window
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
# size of the map
MAP_WIDTH = 80
MAP_HEIGHT = 43
LEVEL_SCREEN_WIDTH = 40
CHARACTER_SCREEN_WIDTH = 30
# Experience and level-ups
LEVEL_UP_BASE = 200
LEVEL_UP_FACTOR = 150
# Size and number of rooms
ROOM_MAX_SIZE = 10
ROOM_MIN_SIZE = 6
MAX_ROOMS = 30
INVENTORY_WIDTH = 50
HEAL_AMOUNT = 40
LIGHTNING_DAMAGE = 40
LIGHTNING_RANGE = 5
CONFUSE_NUM_TURNS = 10
CONFUSE_RANGE = 8
FIREBALL_DAMAGE = 25
FIREBALL_RADIUS = 3
FOV_ALGO = 0 # default FOV algorithm
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 10
SQUARED_TORCH_RADIUS = TORCH_RADIUS * TORCH_RADIUS
dx = 0.0
dy = 0.0
di = 0.0
fov_recompute = None
fov_noise = None
fov_torchx = 0.0
color_dark_wall = libtcod.Color(40, 40, 40)
color_light_wall = libtcod.Color(60, 60, 60)
color_dark_ground = libtcod.Color(25, 25, 25)
color_light_ground = libtcod.Color(255, 230, 100)
LIMIT_FPS = 20 # 20 frames-per-second limit
# Number of frames to wait after moving/attacking
PLAYER_SPEED = 2
DEFAULT_SPEED = 8
DEFAULT_ATTACK_SPEED = 20
# Sizes and coordinates relevant for the GUI
BAR_WIDTH = 20
PANEL_HEIGHT = 7
PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT
MSG_X = BAR_WIDTH + 2
MSG_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2
MSG_HEIGHT = PANEL_HEIGHT - 1
class Rect:
# A rectangle on the map
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
center_x = int(round((self.x1 + self.x2) / 2))
center_y = int(round((self.y1 + self.y2) / 2))
return (center_x, center_y)
def intersect(self, other):
# Returns of this rectangle intersects with another one
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y1)
class Tile:
# A tile of the map and its properties
def __init__(self, blocked, block_sight=None):
self.blocked = blocked
self.explored = False
# By default, a blocked tile also blocks sight
if block_sight is None:
block_sight = blocked
self.block_sight = block_sight
class Object:
# This is a generic object: player, monster, item, ...
# It is always represented by a character on screen
def __init__(self, x, y, char, name, color, blocks=False,
always_visible=False, fighter=None, ai=None,
item=None, speed=DEFAULT_SPEED):
self.x = int(x)
self.y = int(y)
self.char = char
self.name = name
self.color = color
self.blocks = blocks
self.always_visible = always_visible
self.fighter = fighter
if self.fighter:
self.fighter.owner = self
self.ai = ai
if self.ai:
self.ai.owner = self
self.item = item
if self.item:
self.item.owner = self
self.speed = speed
self.wait = 0
def distance_to(self, other):
# Return the distance to another object
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
def distance(self, x, y):
# Return the distance to some coordinates
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)
def move(self, dx, dy):
# Move by given amount
try:
if not is_blocked(self.x + dx, self.y + dy):
self.x += dx
self.y += dy
except:
self.x = self.x
self.y = self.y
self.wait = self.speed
def move_towards(self, target_x, target_y):
# vector from this object to the target and distance
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
# Normalize it to length 1 (preserving direction), then round it and
# convert to integer so movement is restricted to the map grid
dx = int(round(dx / distance))
dy = int(round(dy / distance))
self.move(dx, dy)
def draw(self):
if (libtcod.map_is_in_fov(fov_map, self.x, self.y) or
(self.always_visible and map[self.x, self.y].explored)):
# Set the color and draw the character
libtcod.console_put_char(con, self.x, self.y,
self.char, libtcod.BKGND_NONE)
libtcod.console_set_char_foreground(con, self.x,
self.y, self.color)
def send_to_back(self):
# Make this object be drawn first, so all other objects appear above
# if they are in the same tile.
global objects
objects.remove(self)
objects.insert(0, self)
def clear(self):
# Erease the character that represents this object
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
class Item:
# An item that can be picked up and used
def __init__(self, use_function=None):
self.use_function = use_function
def pick_up(self):
# Add to the player's inventory and remove from the map
if len(inventory) >= 26:
message('Your inventory is full, \
cannot pick up {}.'.format(self.owner.name), libtcod.red)
else:
inventory.append(self.owner)
objects.remove(self.owner)
message('You picked up {}!'.format(self.owner.name), libtcod.green)
def use(self):
# Just call the use_function if it is defined
if self.use_function is None:
message('The {} cannot be used.'.format(self.owner.name))
else:
if self.use_function() != 'cancelled':
inventory.remove(self.owner) # Destroy after use unless it was
# cancelled for some reason
def drop(self):
# Add to the map and remove from the player's inventory
objects.append(self.owner)
inventory.remove(self.owner)
self.owner.x = player.x
self.owner.y = player.y
message('You dropped a {}.'.format(self.owner.name), libtcod.yellow)
class Fighter:
# Combat reated properties and methods (monster, player, NPC)
def __init__(self, hp, defense, power, xp, death_function=None,
attack_speed=DEFAULT_ATTACK_SPEED):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
self.xp = xp
self.death_function = death_function
self.attack_speed = attack_speed
def take_damage(self, damage):
# Apply damage if possible
if damage > 0:
self.hp -= damage
if self.hp <= 0:
function = self.death_function
if function is not None:
function(self.owner)
if self.owner != player: # Yield experience to the player
player.fighter.xp += self.xp
def heal(self, amount):
# Heal by the given amount, without going over the maximum
self.hp += amount
if self.hp > self.max_hp:
self.hp = self.max_hp
def attack(self, target):
# A simple formula for attack damage
damage = self.power - target.fighter.defense
if damage > 0:
# Make the target take some damage
message('{} attacks {} for {} \
hit points.'.format(self.owner.name.capitalize(),
target.name, str(damage)))
target.fighter.take_damage(damage)
else:
message('{} attacks {} but it \
has no effect!'.format(self.owner.name.capitalize(),
target.name))
self.owner.wait = self.attack_speed
class BasicMonster:
# AI for a basic monster
def take_turn(self):
# A basic monster that takes its turn.
# If you can see it, it can see you
monster = self.owner
if libtcod.map_is_in_fov(fov_map, monster.x, monster.y):
# Move towards player if far away
if monster.distance_to(player) >= 2:
monster.move_towards(player.x, player.y)
# Close enough. Attack if the player is alive
elif player.fighter.hp > 0:
monster.fighter.attack(player)
class ConfusedMonster:
# AI for a temporarily confused monster (reverts to
# previous AI after a while).
def __init__(self, old_ai, num_turns=CONFUSE_NUM_TURNS):
self.old_ai = old_ai
self.num_turns = num_turns
def take_turn(self):
if self.num_turns > 0: # Still confused
# Move in a random direction
self.owner.move(libtcod.random_get_int(0, -1, 1),
libtcod.random_get_int(0, -1, 1))
else: # Restore the previous AI (this one will be deleted because
# it's not referenced anymore)
self.owner.ai = self.old_ai
message('The {} is no longer confused!'.format(self.owner.name),
libtcod.red)
def player_death(player):
# The game ended!
global game_state
message('You died!', libtcod.red)
game_state = 'dead'
# For added effect, transform the player into a corpse
player.char = b'%'
player.color = libtcod.dark_red
def monster_death(monster):
# Transform it into a nasty corpse. It does not block, cannot be attacked
# and does not move
message('The {} is dead! \
You gain {} experience.'.format(monster.name.capitalize(),
str(monster.fighter.xp)))
monster.char = b'%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of {}'.format(monster.name)
monster.send_to_back()
def closest_monster(max_range):
# Find the closest enemy up to a maximum range and in the player's FOV
closest_enemy = None
closest_dist = max_range + 1 # Start with (slightly more than) max. range
for object in objects:
if (
object.fighter and not
object == player and
libtcod.map_is_in_fov(fov_map, object.x, object.y)
):
# Calculate the distance between this object and the player
dist = player.distance_to(object)
if dist < closest_dist: # Its closer, so remember it
closest_enemy = object
closest_dist = dist
return closest_enemy
def cast_heal():
#heal the player
if player.fighter.hp == player.fighter.max_hp:
message('You are already at full health.', libtcod.red)
return 'cancelled'
message('Your wounds start to feel better!', libtcod.light_violet)
player.fighter.heal(HEAL_AMOUNT)
def cast_lightning():
# Find the closest enemy (inside a maximum range) and damage it
monster = closest_monster(LIGHTNING_RANGE)
if monster is None: # No enemy found within maximum range
message('No enemy is close enough to strike.', libtcod.red)
return 'cancelled'
# Zap it!
message('A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points'.format(monster.name,
str(LIGHTNING_DAMAGE)),
libtcod.light_blue)
monster.fighter.take_damage(LIGHTNING_DAMAGE)
def cast_confuse():
# Ask the player for a target to confuse
message('Left-click an enemy to confuse it, \
or right-click to cancel.', libtcod.light_cyan)
monster = target_monster(CONFUSE_RANGE)
if monster is None:
return 'cancelled'
# Replace the monster's AI with a "confused" one;
# after some turns it will restore the old AI
old_ai = monster.ai
monster.ai = ConfusedMonster(old_ai)
monster.ai.owner = monster # Tell the new component who owns it
message('The eyes of the {} look vacant, \
as he starts to stumble around!'.format(monster.name),
libtcod.light_green)
def cast_fireball():
# Ask the player for a target tile to throw a fireball at
message('Left-click a target tile for the fireball, \
or right-click to cancel.', libtcod.light_cyan)
(x, y) = target_tile()
if x is None:
return 'cancelled'
message('The fireball explodes, \
burning everything within {} tiles!'.format(FIREBALL_RADIUS),
libtcod.orange)
for obj in objects: # Damage every fighter in range, including the player
if obj.distance(x, y) <= FIREBALL_RADIUS and obj.fighter:
message('The {} gets \
burned for {} hit points.'.format(obj.name,
FIREBALL_DAMAGE),
libtcod.orange)
obj.fighter.take_damage(FIREBALL_DAMAGE)
def target_tile(max_range=None):
# Return the position of a tile left-clicked in the player's FOV
# (optionally in a range), or (None, None) if right-clicked
global key
global mouse
while True:
# Render the screen. This erases the inventory and shows the names of
# objects under the mouse.
render_all()
libtcod.console_flush()
# Get mouse position and click status
libtcod.sys_check_for_event((libtcod.EVENT_KEY_PRESS |
libtcod.EVENT_MOUSE), key, mouse)
(x, y) = (mouse.cx, mouse.cy)
# print('{}:{}'.format(str(x), str(y)))
# Accept the taret if the player clicked in FOV and in case a range is
# specified, if it's in that range
if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and
(max_range is None or player.distance(x, y) <= max_range)):
return (x, y)
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return (None, None) # Cancel if right-clicked or Escape is pressed
def target_monster(max_range=None):
# Returns a clicked monster inside FOV up to a range,
# or None if right-clicked
while True:
(x, y) = target_tile(max_range)
if x is None: # Player cancelled
return None
# Return the first clicked monster, otherwise continue looping
for obj in objects:
if obj.x == x and obj.y == y and obj.fighter and obj != player:
return obj
def create_room(room):
global map
# Go through the tiles in the rectangle and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
map[x, y].blocked = False
map[x, y].block_sight = False
def create_h_tunnel(x1, x2, y):
# Horizontal tunnel
global map
for x in range(min(x1, x2), max(x1, x2) + 1):
map[x, y].blocked = False
map[x, y].block_sight = False
def create_v_tunnel(y1, y2, x):
# Vertical tunnel
global map
for y in range(min(y1, y2), max(y1, y2) + 1):
map[x, y].blocked = False
map[x, y].block_sight = False
def is_blocked(x, y):
# First test the map tile
if map[x, y].blocked:
return true
# Now check for any blocking object
for object in objects:
if object.blocks and object.x == x and object.y == y:
return True
return False
def random_choice_index(chances):
# Choose one option from list of chances, returning its index
# The dice will land on some number between 1 and the sum of the chances
dice = libtcod.random_get_int(0, 1, sum(chances))
# Go through all chances, keeping the sum so far
running_sum = 0
choice = 0
for w in chances:
running_sum += w
# See if this dice landed in the part that corresponds to this choice
if dice <= running_sum:
return choice
choice += 1
def random_choice(chances_dict):
# Choose one option from dictionary of chances, returning its key
chances = chances_dict.values()
strings = chances_dict.keys()
return list(strings)[random_choice_index(list(chances))]
def from_dungeon_level(table):
# Returns a value that depends on level. The table specifies which value
# occurs after each level, default is 0
for (value, level) in reversed(table):
if dungeon_level >= level:
return value
return 0
def place_objects(room):
# This is where we decide the chance of each monster or item appearing
# Maximum number of monsters per room
max_monsters = from_dungeon_level([[2, 1], [3, 4], [5, 6]])
# Chance of each monster
monster_chances = {}
monster_chances['orc'] = 80
monster_chances['troll'] = from_dungeon_level([[15, 3], [30, 5], [60, 7]])
# Maximum number of items per room
max_items = from_dungeon_level([[1, 1], [2, 4]])
# Chance of each item (by default they have a chance of 0 at level 1,
# which then goes up)
item_chances = {}
item_chances['heal'] = 35
item_chances['lightning'] = from_dungeon_level([[25, 4]])
item_chances['fireball'] = from_dungeon_level([[25, 6]])
item_chances['confuse'] = from_dungeon_level([[10, 2]])
# Choose a random number of monsters
num_monsters = libtcod.random_get_int(0, 0, max_monsters)
for i in range(num_monsters):
# Choose random spot for this monster
x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1)
y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1)
# Only place it if the tile is unblocked
if not is_blocked(x, y):
choice = random_choice(monster_chances)
if choice == 'orc':
# create an orc
fighter_component = Fighter(hp=20,
defense=0,
power=4,
xp=35,
death_function=monster_death)
ai_component = BasicMonster()
monster = Object(x, y, 'o', 'orc', libtcod.desaturated_green,
blocks=True, fighter=fighter_component,
ai=ai_component)
elif choice == 'troll':
# create a troll
fighter_component = Fighter(hp=30,
defense=2,
power=8,
xp=100,
death_function=monster_death)
ai_component = BasicMonster()
monster = Object(x, y, 'T', 'troll', libtcod.darker_green,
blocks=True, fighter=fighter_component,
ai=ai_component)
objects.append(monster)
# Choose random number of items
num_items = libtcod.random_get_int(0, 0, max_items)
for i in range(num_items):
# Choose random spot for this item
x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1)
y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1)
# Only place it if the tile is not blocked
if not is_blocked(x, y):
choice = random_choice(item_chances)
if choice == 'heal':
# Create a healing potion (70% chance)
item_component = Item(use_function=cast_heal)
item = Object(x, y, '!', 'healing potion',
libtcod.violet,
item=item_component)
elif choice == 'lightning':
# Create a lightning bolt scroll (10% chance)
item_component = Item(use_function=cast_lightning)
item = Object(x, y, '#', 'scroll of lightning bolt',
libtcod.yellow,
item=item_component)
elif choice == 'fireball':
# Create a fireball scroll (10% chance)
item_component = Item(use_function=cast_fireball)
item = Object(x, y, '#', 'scroll of fireball',
libtcod.light_orange,
item=item_component)
elif choice == 'confuse':
# Create a confuse scroll (10% chance)
item_component = Item(use_function=cast_confuse)
item = Object(x, y, '#', 'scroll of confusion',
libtcod.light_yellow,
item=item_component)
objects.append(item)
item.send_to_back()
item.always_visible = True
def make_map():
global map, objects, stairs
# The listof objects with just the player
objects = [player]
# Fill the map with unblocked tiles
map = np.zeros((MAP_WIDTH, MAP_HEIGHT), object)
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
map[x, y] = Tile(True)
rooms = []
num_rooms = 0
for r in range(MAX_ROOMS):
# Random width and height
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
# Random position without going of the map boundaries
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
new_room = Rect(x, y, w, h)
failed = False
for other_room in rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
create_room(new_room)
# add some contents to this room, such as monsters
place_objects(new_room)
(new_x, new_y) = new_room.center()
if num_rooms == 0:
# if this is the first room, the player starts here
player.x = new_x
player.y = new_y
else:
# All rooms after the first
# connect it to the previous room with a tunnel
#center coordinated of previous room
(prev_x, prev_y) = rooms[num_rooms - 1].center()
# Draw a coin (random number that is either 0 or 1)
if libtcod.random_get_int(0, 0, 1) == 1:
# first move horizontally, then vertical
create_h_tunnel(prev_x, new_x, prev_y)
create_v_tunnel(prev_y, new_y, new_x)
else:
# first move vertically, then horizontally
create_v_tunnel(prev_y, new_y, prev_x)
create_h_tunnel(prev_x, new_x, new_y)
# Finally, append the new room to the list
rooms.append(new_room)
num_rooms += 1
# Create stairs at the center of the last room
stairs = Object(new_x, new_y, '<', 'stairs', libtcod.white,
always_visible=True)
objects.append(stairs)
def next_level():
# Advance to the next level
global dungeon_level
message('You take a moment to rest and recover your strength.',
libtcod.light_violet)
# Heal the player by 50 %
player.fighter.heal(int(round(player.fighter.max_hp / 2)))
message('After a rare moment of peace, you descend \
deeper into the heart of the dungeon...', libtcod.red)
dungeon_level += 1
make_map()
initialize_fov()
def render_all():
global fov_map, color_dark_wall, color_light_wall
global color_dark_ground, color_light_ground
global fov_recompute, fov_torchx
if fov_recompute:
#recompute FOV if needed (the player moved or something)
fov_recompute = False
libtcod.map_compute_fov(fov_map, player.x, player.y, TORCH_RADIUS,
FOV_LIGHT_WALLS, FOV_ALGO)
#torch flickers (using noise generator)
fov_torchx += 0.2
tdx = [fov_torchx + 20.0]
dx = libtcod.noise_get(fov_noise, tdx) * 1.5
tdx[0] += 30.0
dy = libtcod.noise_get(fov_noise, tdx) * 1.5
di = 0.2 * libtcod.noise_get(fov_noise, [fov_torchx])
# Iterate through rendering queue
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
visible = libtcod.map_is_in_fov(fov_map, x, y)
wall = map[x, y].block_sight # check if tile is a wall
if not visible:
# if it's not visible right now, the player can only
# see it if it's explored
if map[x, y].explored:
# It's out of the player's FOV
if wall:
libtcod.console_set_char_background(con, x, y,
color_dark_wall,
libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y,
color_dark_ground,
libtcod.BKGND_SET)
else:
# It's visible
if wall:
base = color_dark_wall
light = color_light_wall
else:
base = color_dark_ground
light = color_light_ground
#Let the torch actually flicker
r = float(x - player.x + dx) * (x - player.x + dx) + \
(y - player.y + dy) * (y - player.y + dy)
if r < SQUARED_TORCH_RADIUS:
l = (SQUARED_TORCH_RADIUS - r) / SQUARED_TORCH_RADIUS + di
if l < 0.0:
l = 0.0
elif l > 1.0:
l = 1.0
# alter base colors to simulate flickering torch
base = libtcod.color_lerp(base, light, l)
# actually draw the visible tile
libtcod.console_set_char_background(con, x, y, base,
libtcod.BKGND_SET)
#since it's visible, it's explored
map[x, y].explored = True
# Draw all objects in the list
for object in objects:
if object != player:
object.draw()
player.draw()
# Blit the contents of con to the root console
libtcod.console_blit(con, 0, 0, MAP_WIDTH, MAP_HEIGHT, 0, 0, 0)
# Prepare to render the GUI panel
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_clear(panel)
# Print the game messages
y = 1
for (line, color) in game_msgs:
libtcod.console_set_default_foreground(panel, color)
libtcod.console_set_alignment(panel, libtcod.LEFT)
libtcod.console_print(panel, MSG_X, y, line)
y += 1
# Show the player's stats
render_bar(1, 1, BAR_WIDTH, 'HP', player.fighter.hp, player.fighter.max_hp,
libtcod.light_red, libtcod.darker_red)
libtcod.console_set_alignment(panel, libtcod.LEFT)
libtcod.console_print(panel, 1, 3,
'Dungeon level {}'.format(str(dungeon_level)))
# Display names of objects under the mouse
libtcod.console_set_default_foreground(panel, libtcod.light_grey)
libtcod.console_print(panel, 1, 0, get_names_under_mouse())
# Blit the contents of "panel" to the root console
libtcod.console_blit(panel, 0, 0, SCREEN_WIDTH, PANEL_HEIGHT, 0, 0,
PANEL_Y)
def menu(header, options, width):
global key
global mouse
if len(options) > 26:
raise ValueError('Cannot have a menu with more than 26 options!')
# Calculate total height for the header (after auto-wrap)
# and one line per option
header_height = libtcod.console_get_height_rect(con, 0, 0, width,
SCREEN_HEIGHT, header)
if header == '':
header_height = 0
height = len(options) + header_height
# Create an off-screen console that represents the menu's window
window = libtcod.console_new(width, height)
# Print the header, with auto-wrap
libtcod.console_set_default_foreground(window, libtcod.white)
libtcod.console_set_alignment(window, libtcod.LEFT)
libtcod.console_set_default_background(window, libtcod.BKGND_NONE)
libtcod.console_print_rect(window, 0, 0, width, height, header)
# Print all the options
y = header_height
letter_index = ord('a')
for option_text in options:
text = '({}) {}'.format(chr(letter_index), option_text)
libtcod.console_print(window, 0, y, text)
y += 1
letter_index += 1
# Blit the contents of "window" to the root console
x = int(round(SCREEN_WIDTH / 2 - width / 2))
y = int(round(SCREEN_HEIGHT / 2 - height / 2))
libtcod.console_blit(window, 0, 0, width, height, 0, x, y, 1.0, 0.7)
# Present the root console to the player and wait for a key-press
libtcod.console_flush()
libtcod.sys_wait_for_event(libtcod.EVENT_KEY_PRESS, key, mouse, False)
if key.vk == libtcod.KEY_ENTER and key.lalt:
#(special case) Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
# Convert the ASCII code to an index; if it corresponds to an
# option, return it
index = key.c - ord('a')
if index >= 0 and index < len(options):
return index
return None
def msgbox(text, width=50):
menu(text, [], width) # Use menu() as a sort of "message box"
def inventory_menu(header):
# Show a menu with each item of the inventory as an option
if len(inventory) == 0:
options = ['Inventory is empty.']
else:
options = [item.name for item in inventory]
index = menu(header, options, INVENTORY_WIDTH)
# If an item was chosen, return it
if index is None or len(inventory) == 0:
return None
return inventory[index].item
def render_bar(x, y, total_width, name, value, maximum, bar_color, back_color):
# Render a bar (HP, experience, etc). First calculate the width of the bar
bar_width = int(float(value) / maximum * total_width)
# Render the background first
libtcod.console_set_default_background(panel, back_color)
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SET)
# Now render the bar on top
libtcod.console_set_default_background(panel, bar_color)
if bar_width > 0:
libtcod.console_rect(panel, x, y, bar_width, 1, False,
libtcod.BKGND_SET)
# Add centered text with values
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_set_alignment(panel, libtcod.CENTER)
bar_text = '{}: {}/{}'. format(name, str(value), str(maximum))
libtcod.console_print(panel, int(x + total_width / 2), y, bar_text)
def message(new_msg, color=libtcod.white):
# Split the message if necesary, among multiple lines
new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)
for line in new_msg_lines:
# if the buffer is full, remove first line to make room for the new one
if len(game_msgs) == MSG_HEIGHT:
del game_msgs[0]
# Add the new line as a tuple, with the text and coloe
game_msgs.append((line, color))
def get_names_under_mouse():
# Return a string with the names of all objects under the mouse
global key
global mouse
libtcod.sys_check_for_event(libtcod.EVENT_MOUSE, key, mouse)
(x, y) = (mouse.cx, mouse.cy)
# Create a list with the names of all objects at the mouse's
# coordinates and in FOV
names = [obj.name for obj in objects
if (obj.x == x and
obj.y == y and
libtcod.map_is_in_fov(fov_map, obj.x, obj.y))]
names = ', '.join(names) # join the names, separated by commas
return names.capitalize()
def handle_keys():
global fov_recompute
global key
global mouse
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS, key, mouse)
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: Fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
return
elif key.vk == libtcod.KEY_ESCAPE:
return 'exit' # exit game
if game_state == 'playing':
if player.wait > 0: # Don't take a turn yet if still waiting
player.wait -= 1
return
if libtcod.console_is_key_pressed(libtcod.KEY_UP):
player_move_or_attack(0, -1)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
player_move_or_attack(0, 1)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
player_move_or_attack(-1, 0)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
player_move_or_attack(1, 0)
fov_recompute = True
# Diagonal movement using the numpad keys
elif libtcod.console_is_key_pressed(libtcod.KEY_KP7):
player_move_or_attack(-1, -1)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_KP9):
player_move_or_attack(1, -1)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_KP1):
player_move_or_attack(-1, 1)
fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_KP3):
player_move_or_attack(1, 1)
fov_recompute = True
else:
key_char = chr(key.c)
if key_char == 'g':
# Pick up an item
for object in objects: # Look for an item in the player's tile
if (
object.x == player.x and object.y == player.y and
object.item
):
object.item.pick_up()
break
if key_char == 'i':
# Show the inventory
chosen_item = inventory_menu('Press the key next to an \
item to use it, or any other \
to cancel.\n')
if chosen_item is not None:
chosen_item.use()
if key_char == 'd':
# Show the inventory; if an item is selected, drop it
chosen_item = inventory_menu('Press the key next to an \
item to drop it, or any other \
to cancel.\n')
if chosen_item is not None:
chosen_item.drop()
if key_char == '<':
# Go down stairs, if the player is on them
if stairs.x == player.x and stairs.y == player.y:
next_level()
if key_char == 'c':
# Show character information
level_up_xp = LEVEL_UP_BASE + player.level * LEVEL_UP_FACTOR
msgbox('Character Information\n\nLevel: {}\n\
Experience: \{}\nExperience to level up: {}\n\n\
Maximum HP: {}\nAttack: {}\n\
Defense: {}'.format(str(player.level),
str(player.fighter.xp),
str(level_up_xp),
str(player.fighter.max_hp),
str(player.fighter.power),
str(player.fighter.defense)),
CHARACTER_SCREEN_WIDTH)
return 'didnt-take-turn'
def player_move_or_attack(dx, dy):
global fov_recompute
# The coordinated the player is moving to
x = player.x + dx
y = player.y + dy
# Try to find an attackable object there
target = None
for object in objects:
if object.fighter and object.x == x and object.y == y:
target = object
break
# Attack if target found, move otherwise
if target is not None:
player.fighter.attack(target)
else:
player.move(dx, dy)
fov_recompute = True
def check_level_up():
# See if the player's experience is enough to level-up
level_up_xp = LEVEL_UP_BASE + player.level * LEVEL_UP_FACTOR
if player.fighter.xp >= level_up_xp:
# It is! Level up
player.level += 1
player.fighter.xp -= level_up_xp
message('Your battle skills grow stronger! \
You reached level {}!'.format(str(player.level)),
libtcod.yellow)
choice = None
while choice is None: # Keep asking until a choice is made
choice = menu('Level up! Choose a stat to raise:\n',
['Constitution \
(+20 HP, from {})'.format(str(player.fighter.hp)),
'Strenght (+1 attack, \
from {})'.format(str(player.fighter.power)),
'Agility (+1 defense, \
from {})'.format(str(player.fighter.defense))],
LEVEL_SCREEN_WIDTH)
if choice == 0:
player.fighter.max_hp += 20
player.fighter.hp += 20
elif choice == 1:
player.fighter.power += 1
elif choice == 2:
player.fighter.defense += 1
def initialize_fov():
global fov_recompute, fov_map, fov_noise
fov_recompute = True
libtcod.console_clear(con) # Unexplored areas start black (which
# is the default background color)
#create the FOV map, according to the generated map
fov_noise = libtcod.noise_new(1, 1.0, 1.0)
fov_map = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT)
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
libtcod.map_set_properties(fov_map, x, y,
not map[x, y].block_sight,
not map[x, y].blocked)
def new_game():
global player, inventory, game_msgs, game_state, key, mouse, dungeon_level
# Create object representing the player
fighter_component = Fighter(hp=100, defense=1, power=4, xp=0,
death_function=player_death)
player = Object(0, 0, b'@', 'player', libtcod.white, blocks=True,
fighter=fighter_component, speed=PLAYER_SPEED)
player.level = 1
# Generate map
dungeon_level = 1
make_map()
initialize_fov()
game_state = 'playing'
inventory = []
# Create the list of game messages and their colors, starts empty
game_msgs = []
# a warm welcoming message!
message('Welcome stranger! Prepare to perish in the \
Tombs of the Ancient Kings.', libtcod.red)
def play_game():
player_action = None
while not libtcod.console_is_window_closed():
# Render the screen
render_all()
libtcod.console_flush()
check_level_up()
# Erease all objects at their old locations, befor they move
for object in objects:
object.clear()
# Handle keys and exit if needed
player_action = handle_keys()
if player_action == 'exit':
save_game() # Save the current game before exit
break
# Let the monsters take their turn
if game_state == 'playing':
for object in objects:
if object.ai:
# Don't take a turn yet if still waiting
if object.wait > 0:
object.wait -= 1
else:
object.ai.take_turn()
def save_game():
# Open a new emtpy shelve (possibly overwriting an old one)
# to write the game data
file = shelve.open('savegame', 'n')
file['map'] = map
file['objects'] = objects
# Index of player in objects list
file['player_index'] = objects.index(player)
file['inventory'] = inventory
file['game_msgs'] = game_msgs
file['game_state'] = game_state
file['stairs_index'] = objects.index(stairs)
file['dungeon_level'] = dungeon_level
file.close()
def load_game():
# Open the previously saved shelve and load the game data
global map, objects, player, inventory, game_msgs
global game_state, stairs, dungeon_level
file = shelve.open('savegame', 'r')
map = file['map']
objects = file['objects']
# Get the index of the player in objects list and access it
player = objects[file['player_index']]
inventory = file['inventory']
game_msgs = file['game_msgs']
game_state = file['game_state']
stairs = objects[file['stairs_index']]
dungeon_level = file['dungeon_level']
file.close()
initialize_fov()
def main_menu():
img = libtcod.image_load(b'img/backgrounds/menu_background.png')
while not libtcod.console_is_window_closed():
# Show the background image at twice the regular console resolution
libtcod.image_blit_2x(img, 0, 0, 0)
# Show the game's title
libtcod.console_set_default_foreground(0, libtcod.light_yellow)
libtcod.console_set_alignment(0, libtcod.CENTER)
libtcod.console_print(0, int(round(SCREEN_WIDTH / 2)),
int(round(SCREEN_HEIGHT / 2 - 4)),
'THE LEGEND OF THARSA')
libtcod.console_print(0, int(round(SCREEN_WIDTH / 2)),
int(round(SCREEN_HEIGHT - 2)),
'By Athemis')
# Show the options and wait for the player's choice
choice = menu('', ['Play a new game', 'Continue last game', 'Quit'],
24)
if choice == 0: # New game
new_game()
play_game()
elif choice == 1: # Load last game
try:
load_game()
except:
msgbox('\n No saved game to load. \n', 24)
continue
play_game()
elif choice == 2: # Quit
break
##############################
# Initialization & Main Loop
##############################
libtcod.console_set_custom_font(b'img/fonts/arial10x10.png',
(libtcod.FONT_TYPE_GREYSCALE |
libtcod.FONT_LAYOUT_TCOD))
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT,
b'LoT - The Legend of Tharsa', False)
libtcod.sys_set_fps(LIMIT_FPS)
con = libtcod.console_new(MAP_WIDTH, MAP_HEIGHT)
panel = libtcod.console_new(SCREEN_WIDTH, PANEL_HEIGHT)
key = libtcod.Key()
mouse = libtcod.Mouse()
main_menu()
| mit |
fx2003/tensorflow-study | TensorFlow实战/models/attention_ocr/python/data_provider_test.py | 18 | 2448 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_provider."""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim import queues
import datasets
import data_provider
class DataProviderTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
def test_preprocessed_image_values_are_in_range(self):
image_shape = (5, 4, 3)
fake_image = np.random.randint(low=0, high=255, size=image_shape)
image_tf = data_provider.preprocess_image(fake_image)
with self.test_session() as sess:
image_np = sess.run(image_tf)
self.assertEqual(image_np.shape, image_shape)
min_value, max_value = np.min(image_np), np.max(image_np)
self.assertTrue((-1.28 < min_value) and (min_value < 1.27))
self.assertTrue((-1.28 < max_value) and (max_value < 1.27))
def test_provided_data_has_correct_shape(self):
batch_size = 4
data = data_provider.get_data(
dataset=datasets.fsns_test.get_test_split(),
batch_size=batch_size,
augment=True,
central_crop_size=None)
with self.test_session() as sess, queues.QueueRunners(sess):
images_np, labels_np = sess.run([data.images, data.labels_one_hot])
self.assertEqual(images_np.shape, (batch_size, 150, 600, 3))
self.assertEqual(labels_np.shape, (batch_size, 37, 134))
def test_optionally_applies_central_crop(self):
batch_size = 4
data = data_provider.get_data(
dataset=datasets.fsns_test.get_test_split(),
batch_size=batch_size,
augment=True,
central_crop_size=(500, 100))
with self.test_session() as sess, queues.QueueRunners(sess):
images_np = sess.run(data.images)
self.assertEqual(images_np.shape, (batch_size, 100, 500, 3))
if __name__ == '__main__':
tf.test.main()
| mit |
eadgarchen/tensorflow | tensorflow/python/keras/datasets/reuters/__init__.py | 71 | 1061 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters newswire topic classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.datasets.reuters import get_word_index
from tensorflow.python.keras._impl.keras.datasets.reuters import load_data
del absolute_import
del division
del print_function
| apache-2.0 |
TinghuiWang/pyActLearn | examples/CASAS_Single_Test/b1_sda_raw.py | 1 | 8304 | import os
import pickle
import logging
import argparse
import numpy as np
import tensorflow as tf
from datetime import datetime
from pyActLearn.CASAS.data import CASASData
from pyActLearn.CASAS.fuel import CASASFuel
from pyActLearn.learning.nn.sda import SDA
from pyActLearn.performance.record import LearningResult
from pyActLearn.performance import get_confusion_matrix
logger = logging.getLogger(__file__)
def training_and_test(token, train_data, test_data, num_classes, result, model, log_dir):
"""Train and test
Args:
token (:obj:`str`): token representing this run
train_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of training feature and label
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
train_y = np.zeros((train_data[1].shape[0], num_classes))
test_y = np.zeros((test_data[1].shape[0], num_classes))
for i in range(train_data[1].shape[0]):
train_y[i, train_data[1].flatten()[i]] = 1
for i in range(test_data[1].shape[0]):
test_y[i, test_data[1].flatten()[i]] = 1
model.fit(train_data[0], train_y, pretrain_iter_num=8000,
tuning_iter_num=8000, tuning_criterion='monitor_based',
summaries_dir=log_dir, test_x=test_data[0], test_y=test_y,
summary_interval=100)
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
# Evaluate the Test and Store Result
confusion_matrix = get_confusion_matrix(num_classes=num_classes,
label=test_data[1].flatten(), predicted=predicted_y)
variable_file = os.path.join(log_dir, token + '_save.ckpt')
saver.save(model.sess, variable_file)
result.add_record(variable_file, key=token, confusion_matrix=confusion_matrix)
return predicted_y, predicted_proba
def load_and_test(token, test_data, num_classes, result, model):
"""Load and test
Args:
token (:obj:`str`): token representing this run
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
saver.restore(model.sess, result.get_record_by_key(token)['model'])
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
return predicted_y, predicted_proba
if __name__ == '__main__':
args_ok = False
parser = argparse.ArgumentParser(description='Run Stacked Autoencoder on single resident CASAS datasets.')
parser.add_argument('-d', '--dataset', help='Directory to original datasets')
parser.add_argument('-o', '--output', help='Output folder')
parser.add_argument('--week', type=int, metavar='N', help='Train on week N-1 and run on week N')
parser.add_argument('--h5py', help='HDF5 dataset folder')
args = parser.parse_args()
# Default parameters
log_filename = os.path.basename(__file__).split('.')[0] + \
'-%s.log' % datetime.now().strftime('%y%m%d_%H:%M:%S')
# Setup output directory
output_dir = args.output
if output_dir is not None:
output_dir = os.path.abspath(os.path.expanduser(output_dir))
if os.path.exists(output_dir):
# Found output_dir, check if it is a directory
if not os.path.isdir(output_dir):
exit('Output directory %s is found, but not a directory. Abort.' % output_dir)
else:
# Create directory
os.makedirs(output_dir)
else:
output_dir = '.'
log_filename = os.path.join(output_dir, log_filename)
# Setup Logging as early as possible
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(name)s:%(levelname)s:%(message)s',
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
# If dataset is specified, update h5py
casas_data_dir = args.dataset
if casas_data_dir is not None:
casas_data_dir = os.path.abspath(os.path.expanduser(casas_data_dir))
if not os.path.isdir(casas_data_dir):
exit('CASAS dataset at %s does not exist. Abort.' % casas_data_dir)
# Find h5py dataset first
h5py_dir = args.h5py
if h5py_dir is not None:
h5py_dir = os.path.abspath(os.path.expanduser(h5py_dir))
else:
# Default location
h5py_dir = os.path.join(output_dir, 'h5py')
if os.path.exists(h5py_dir):
if not os.path.isdir(h5py_dir):
exit('h5py dataset location %s is not a directory. Abort.' % h5py_dir)
if not CASASFuel.files_exist(h5py_dir):
# Finish check and creating all directory needed - now load datasets
if casas_data_dir is not None:
casas_data = CASASData(path=casas_data_dir)
casas_data.summary()
# SVM needs to use statistical feature with per-sensor and normalization
casas_data.populate_feature(method='raw', normalized=True, per_sensor=True)
casas_data.export_hdf5(h5py_dir)
casas_fuel = CASASFuel(dir_name=h5py_dir)
# Prepare learning result
result_pkl_file = os.path.join(output_dir, 'result.pkl')
result = None
if os.path.isfile(result_pkl_file):
f = open(result_pkl_file, 'rb')
result = pickle.load(f)
f.close()
if result.data != h5py_dir:
logger.error('Result pickle file found for different dataset %s' % result.data)
exit('Cannot save learning result at %s' % result_pkl_file)
else:
result = LearningResult(name='DecisionTree', data=h5py_dir, mode='by_week')
num_classes = casas_fuel.get_output_dims()
# Open Fuel and get all splits
split_list = casas_fuel.get_set_list()
# If week is specified
if args.week is not None:
if 0 < args.week < len(split_list):
split_list = [split_list[args.week - 1], split_list[args.week]]
# Start training
train_names = ('week 24', 'week 23', 'week 22', 'week 21')
test_names = ('week 25', 'week 26', 'week 27', 'week 28')
test_name = 'single_test'
train_set = casas_fuel.get_dataset(train_names, load_in_memory=True)
(train_set_data) = train_set.data_sources
test_set = casas_fuel.get_dataset(test_names, load_in_memory=True)
(test_set_data) = test_set.data_sources
# Prepare Back Annotation
fp_back_annotated = open(os.path.join(output_dir, 'back_annotated.txt'), 'w')
fp_back_probability = open(os.path.join(output_dir, 'back_annotated_proba.txt'), 'w')
output_log_dir = os.path.join(output_dir, 'log')
if not os.path.isdir(output_log_dir):
os.makedirs(output_log_dir)
sda = SDA(casas_fuel.get_input_dims(), casas_fuel.get_output_dims(), [200, 200, 200])
saver = tf.train.Saver(max_to_keep=len(split_list))
session = tf.Session()
sda.sess = session
log_dir = output_log_dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# run svm
logger.info('Training on %s, Testing on %s' % (str(train_names), str(test_names)))
if result.get_record_by_key(test_name) is None:
prediction, prediction_proba = training_and_test(test_name, train_set_data, test_set_data, num_classes,
result, model=sda, log_dir=log_dir)
else:
prediction, prediction_proba = load_and_test(test_name, test_set_data, num_classes, result, model=sda)
casas_fuel.back_annotate(fp_back_annotated, prediction=prediction, split_name=test_names)
casas_fuel.back_annotate_with_proba(fp_back_probability, prediction_proba, split_name=test_names)
train_name = test_name
train_set_data = test_set_data
fp_back_annotated.close()
fp_back_probability.close()
f = open(result_pkl_file, 'wb')
pickle.dump(obj=result, file=f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
result.export_to_xlsx(os.path.join(output_dir, 'result.xlsx'))
| bsd-3-clause |
jdanbrown/pydatalab | datalab/bigquery/_dataset.py | 6 | 8970 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Dataset, and related Dataset BigQuery APIs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
import datalab.context
import datalab.utils
from . import _api
from . import _table
from . import _utils
from . import _view
class Dataset(object):
"""Represents a list of BigQuery tables in a dataset."""
def __init__(self, name, context=None):
"""Initializes an instance of a Dataset.
Args:
name: the name of the dataset, as a string or (project_id, dataset_id) tuple.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
Raises:
Exception if the name is invalid.
"""
if context is None:
context = datalab.context.Context.default()
self._context = context
self._api = _api.Api(context)
self._name_parts = _utils.parse_dataset_name(name, self._api.project_id)
self._full_name = '%s:%s' % self._name_parts
self._info = None
try:
self._info = self._get_info()
except datalab.utils.RequestException:
pass
@property
def name(self):
"""The DatasetName named tuple (project_id, dataset_id) for the dataset."""
return self._name_parts
@property
def description(self):
"""The description of the dataset, if any.
Raises:
Exception if the dataset exists but the metadata for the dataset could not be retrieved.
"""
self._get_info()
return self._info['description'] if self._info else None
@property
def friendly_name(self):
"""The friendly name of the dataset, if any.
Raises:
Exception if the dataset exists but the metadata for the dataset could not be retrieved.
"""
self._get_info()
return self._info['friendlyName'] if self._info else None
def _get_info(self):
try:
if self._info is None:
self._info = self._api.datasets_get(self._name_parts)
return self._info
except datalab.utils.RequestException as e:
if e.status == 404:
return None
raise e
except Exception as e:
raise e
def exists(self):
""" Checks if the dataset exists.
Returns:
True if the dataset exists; False otherwise.
Raises:
Exception if the dataset exists but the metadata for the dataset could not be retrieved.
"""
self._get_info()
return self._info is not None
def delete(self, delete_contents=False):
"""Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent).
"""
if not self.exists():
raise Exception('Cannot delete non-existent dataset %s' % self._full_name)
try:
self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)
except Exception as e:
raise e
self._info = None
return None
def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self
def update(self, friendly_name=None, description=None):
""" Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns:
"""
self._get_info()
if self._info:
if friendly_name:
self._info['friendlyName'] = friendly_name
if description:
self._info['description'] = description
try:
self._api.datasets_update(self._name_parts, self._info)
except Exception as e:
raise e
finally:
self._info = None # need a refresh
def _retrieve_items(self, page_token, item_type):
try:
list_info = self._api.tables_list(self._name_parts, page_token=page_token)
except Exception as e:
raise e
tables = list_info.get('tables', [])
contents = []
if len(tables):
try:
for info in tables:
if info['type'] != item_type:
continue
if info['type'] == 'TABLE':
item = _table.Table((info['tableReference']['projectId'],
info['tableReference']['datasetId'],
info['tableReference']['tableId']), self._context)
else:
item = _view.View((info['tableReference']['projectId'],
info['tableReference']['datasetId'],
info['tableReference']['tableId']), self._context)
contents.append(item)
except KeyError:
raise Exception('Unexpected item list response')
page_token = list_info.get('nextPageToken', None)
return contents, page_token
def _retrieve_tables(self, page_token, _):
return self._retrieve_items(page_token=page_token, item_type='TABLE')
def _retrieve_views(self, page_token, _):
return self._retrieve_items(page_token=page_token, item_type='VIEW')
def tables(self):
""" Returns an iterator for iterating through the Tables in the dataset. """
return iter(datalab.utils.Iterator(self._retrieve_tables))
def views(self):
""" Returns an iterator for iterating through the Views in the dataset. """
return iter(datalab.utils.Iterator(self._retrieve_views))
def __iter__(self):
""" Returns an iterator for iterating through the Tables in the dataset. """
return self.tables()
def __str__(self):
"""Returns a string representation of the dataset using its specified name.
Returns:
The string representation of this object.
"""
return self._full_name
def __repr__(self):
"""Returns a representation for the dataset for showing in the notebook.
"""
return 'Dataset %s' % self._full_name
class Datasets(object):
""" Iterator class for enumerating the datasets in a project. """
def __init__(self, project_id=None, context=None):
""" Initialize the Datasets object.
Args:
project_id: the ID of the project whose datasets you want to list. If None defaults
to the project in the context.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
"""
if context is None:
context = datalab.context.Context.default()
self._context = context
self._api = _api.Api(context)
self._project_id = project_id if project_id else self._api.project_id
def _retrieve_datasets(self, page_token, count):
try:
list_info = self._api.datasets_list(self._project_id, max_results=count,
page_token=page_token)
except Exception as e:
raise e
datasets = list_info.get('datasets', [])
if len(datasets):
try:
datasets = [Dataset((info['datasetReference']['projectId'],
info['datasetReference']['datasetId']), self._context)
for info in datasets]
except KeyError:
raise Exception('Unexpected response from server.')
page_token = list_info.get('nextPageToken', None)
return datasets, page_token
def __iter__(self):
""" Returns an iterator for iterating through the Datasets in the project.
"""
return iter(datalab.utils.Iterator(self._retrieve_datasets))
| apache-2.0 |
elkingtonmcb/h2o-2 | py/testdir_hosts/test_parse_summary_zip_s3n_fvec.py | 9 | 2393 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_summary_zip_s3n_fvec(self):
csvFilelist = [
("test_set.zip", 300), # 110.9MB
("train_set.zip", 600), # 362.9MB
]
(importResult, importPattern) = h2i.import_only(bucket='h2o-datasets', path="allstate", schema='s3n')
print "\nTrying StoreView after the import hdfs"
h2o_cmd.runStoreView(timeoutSecs=120)
trial = 0
for (csvFilename, timeoutSecs) in csvFilelist:
trialStart = time.time()
csvPathname = csvFilename
# PARSE****************************************
csvPathname = "allstate/" + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='h2o-datasets', path=csvPathname, schema='s3n', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=120)
elapsed = time.time() - start
print "parse end on ", parseResult['destination_key'], 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# INSPECT******************************************
# We should be able to see the parse result?
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=360)
h2o_cmd.infoFromSummary(summaryResult)
# STOREVIEW***************************************
print "\nTrying StoreView after the parse"
h2o_cmd.runStoreView(timeoutSecs=120)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds."
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/data_generators/translate_encs.py | 3 | 3661 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import translate
from tensor2tensor.utils import registry
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# End-of-sentence marker.
EOS = text_encoder.EOS_ID
_ENCS_TRAIN_DATASETS = [
[("https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/"
"11234/1-1458/data-plaintext-format.tar"),
("tsv", 3, 2, "data.plaintext-format/*train.gz")],
[
"http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long
("training-parallel-nc-v13/news-commentary-v13.cs-en.en",
"training-parallel-nc-v13/news-commentary-v13.cs-en.cs")
],
[
"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
("commoncrawl.cs-en.en", "commoncrawl.cs-en.cs")
],
[
"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
("training/europarl-v7.cs-en.en", "training/europarl-v7.cs-en.cs")
],
]
_ENCS_TEST_DATASETS = [
[
"http://data.statmt.org/wmt17/translation-task/dev.tgz",
("dev/newstest2013.en", "dev/newstest2013.cs")
],
]
@registry.register_problem
class TranslateEncsWmt32k(translate.TranslateProblem):
"""Problem spec for WMT English-Czech translation."""
@property
def approx_vocab_size(self):
return 2**15 # 32768
def source_data_files(self, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
return _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS
def vocab_data_files(self):
datasets = self.source_data_files(problem.DatasetSplit.TRAIN)
vocab_datasets = []
if datasets[0][0].endswith("data-plaintext-format.tar"):
vocab_datasets.append([
datasets[0][0], [
"%s-compiled-train.lang1" % self.name,
"%s-compiled-train.lang2" % self.name
]
])
datasets = datasets[1:]
vocab_datasets += [[item[0], [item[1][0], item[1][1]]] for item in datasets]
return vocab_datasets
@registry.register_problem
class TranslateEncsWmtCharacters(translate.TranslateProblem):
"""Problem spec for WMT En-Cs character-based translation."""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def generate_samples(self, data_dir, tmp_dir, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS
tag = "train" if train else "dev"
data_path = translate.compile_data(tmp_dir, datasets,
"wmt_encs_chr_%s" % tag)
return text_problems.text2text_txt_iterator(data_path + ".lang1",
data_path + ".lang2")
| apache-2.0 |
fx2003/tensorflow-study | TensorFlow实战/models/inception/inception/image_processing.py | 14 | 20499 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specified by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.name_scope(values=[image_buffer], name=scope,
default_name='decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image
"""
with tf.name_scope(values=[image], name=scope, default_name='distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(values=[image, height, width, bbox], name=scope,
default_name='distort_image'):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, [height, width],
method=resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(values=[image, height, width], name=scope,
default_name='eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=True,
capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, bbox, _ = parse_example_proto(
example_serialized)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_index_batch, [batch_size])
| mit |
Lawrence-Liu/crab | scikits/crab/recommenders/knn/neighborhood_strategies.py | 10 | 4860 | """
Strategies for users selection to be a
possible candidate to be member of a user neighborhood.
Please check the base.BaseUserNeighborhoodStrategy before
implement your own strategy.
"""
# Author: Marcel Caraciolo <marcel@muricoca.com>
#
# License: BSD Style.
from base import BaseUserNeighborhoodStrategy
import numpy as np
from ...similarities.basic_similarities import UserSimilarity
from ...metrics.pairwise import euclidean_distances
class AllNeighborsStrategy(BaseUserNeighborhoodStrategy):
'''
Returns
--------
Returns all users in the model.
This strategy is not recommended for large datasets and
it is the dummiest one.
'''
def user_neighborhood(self, user_id, data_model, similarity='user_similarity',
distance=None, nhood_size=None, **params):
'''
Computes a neighborhood consisting of the n users to a given user
based on the strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
similarity: string
The similarity to compute the neighborhood (default = 'user_similarity')
|user_similarity'|
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
user_ids = data_model.user_ids()
return user_ids[user_ids != user_id] if user_ids.size else user_ids
class NearestNeighborsStrategy(BaseUserNeighborhoodStrategy):
'''
Returns
--------
Returns the neighborhood consisting of the nearest n
users to a given user. "Nearest" in this context is
defined by the Similarity.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
similarity: string
The similarity to compute the neighborhood (default = 'user_similarity')
|user_similarity'|
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
def __init__(self):
self.similarity = None
def _sampling(self, data_model, sampling_rate):
#TODO: Still to be implemented in a best way
return data_model
def _set_similarity(self, data_model, similarity, distance, nhood_size):
if not isinstance(self.similarity, UserSimilarity) \
or not distance == self.similarity.distance:
nhood_size = nhood_size if not nhood_size else nhood_size + 1
self.similarity = UserSimilarity(data_model, distance, nhood_size)
def user_neighborhood(self, user_id, data_model, n_similarity='user_similarity',
distance=None, nhood_size=None, **params):
'''
Computes a neighborhood consisting of the n users to a given
user based on the strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
n_similarity: string
The similarity to compute the neighborhood (Default = 'user_similarity')
nhood_size: int
The neighborhood size (default = None all users)
Optional Parameters
--------------------
minimal_similarity: float
minimal similarity required for neighbors (default = 0.0)
sampling_rate: int
percentage of users to consider when building neighborhood
(default = 1)
'''
minimal_similarity = params.get('minimal_similarity', 0.0)
sampling_rate = params.get('sampling_rate', 1.0)
data_model = self._sampling(data_model, sampling_rate)
#set the nhood_size at Similarity , and use Similarity to get the top_users
if distance is None:
distance = euclidean_distances
if n_similarity == 'user_similarity':
self._set_similarity(data_model, n_similarity, distance, nhood_size)
else:
raise ValueError('similarity argument must be user_similarity')
neighborhood = [to_user_id for to_user_id, score in self.similarity[user_id] \
if not np.isnan(score) and score >= minimal_similarity and user_id != to_user_id]
return neighborhood
| bsd-3-clause |
marionleborgne/nupic.research | projects/imbu/engine/fluent_api.py | 11 | 6161 | # ----------------------------------------------------------------------
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Implements Imbu's web API.
"""
import simplejson as json
import logging
import os
import pkg_resources
import web
from htmresearch.frameworks.nlp.imbu import ImbuModels
from htmresearch.frameworks.nlp.model_factory import ClassificationModelTypes
g_log = logging.getLogger(__name__)
# No training in Imbu web app, user must specify loadPath
if "IMBU_LOAD_PATH_PREFIX" in os.environ:
_IMBU_LOAD_PATH_PREFIX = os.environ["IMBU_LOAD_PATH_PREFIX"]
else:
raise KeyError("Required IMBU_LOAD_PATH_PREFIX missing from environment")
g_imbus = {} # Global ImbuModels cache
g_models = {} # Global NLP model cache
for datasetName in os.listdir(_IMBU_LOAD_PATH_PREFIX):
datasetPath = os.path.join(_IMBU_LOAD_PATH_PREFIX, datasetName)
if os.path.isdir(datasetPath) and "egg" not in datasetPath:
# Create an imbu instance for each dataset
imbu = ImbuModels(
cacheRoot=os.environ.get("MODEL_CACHE_DIR", os.getcwd()),
modelSimilarityMetric=None,
dataPath=os.path.join(datasetPath, "data.csv"),
retina=os.environ["IMBU_RETINA_ID"],
apiKey=os.environ["CORTICAL_API_KEY"]
)
g_imbus.update(((datasetName, imbu),))
# Init the dict for this dataset's models
g_models[datasetName] = {}
def addStandardHeaders(contentType="application/json; charset=UTF-8"):
"""
Add Standard HTTP Headers ("Content-Type", "Server") to the response.
Here is an example of the headers added by this method using the default
values::
Content-Type: application/json; charset=UTF-8
Server: Imbu x.y.z
:param content_type: The value for the "Content-Type" header.
(default "application/json; charset=UTF-8")
"""
web.header("Server", "Imbu 1.0.0", True)
web.header("Content-Type", contentType, True)
def addCORSHeaders():
"""
Add CORS (http://www.w3.org/TR/cors/) headers
"""
web.header("Access-Control-Allow-Origin", "*", True)
web.header("Access-Control-Allow-Headers",
"accept, access-control-allow-origin, content-type", True)
web.header("Access-Control-Allow-Credentials", "true", True)
web.header("Access-Control-Allow-Methods", "POST", True)
class FluentWrapper(object):
def query(self, dataset, model, text):
"""
Queries the model (which is specific to this dataset) and returns a dict of
matching documents.
:param str dataset: Dataset name, specifying the ImbuModels instance to use.
Possible values correspond to data dirs in _IMBU_LOAD_PATH_PREFIX.
:param str model: Name of the model to use. Possible values are mapped to
classes in the NLP model factory.
:param str text: The text to match.
:returns: dict of results.
::
{
"0": {"text": "sampleText", "scores": [0.75, ...]},
...
}
"""
global g_imbus
global g_models
if model not in g_models[dataset]:
loadPath = os.path.join(_IMBU_LOAD_PATH_PREFIX, dataset, model)
g_models[dataset][model] = g_imbus[dataset].createModel(
model, str(loadPath), None)
if text:
_, sortedIds, sortedDistances = g_imbus[dataset].query(
g_models[dataset][model], text)
return g_imbus[dataset].formatResults(model, text, sortedDistances, sortedIds)
else:
return {}
class DefaultHandler(object):
def GET(self, *args): # pylint: disable=R0201,C0103
addStandardHeaders("text/html; charset=UTF-8")
return "<html><body><h1>Welcome to Nupic Fluent</h1></body></html>"
class DatasetsHandler(object):
"""Handles Dataset requests"""
def GET(self, *args):
"""Use '/fluent/datasets' to get list of available datasets"""
addStandardHeaders()
addCORSHeaders()
return json.dumps(g_imbus.keys())
class FluentAPIHandler(object):
"""Handles API requests"""
def OPTIONS(self, modelName=ImbuModels.defaultModelType): # pylint: disable=R0201,C0103
addStandardHeaders()
addCORSHeaders()
def GET(self, *args):
""" GET global ready status. Returns "true" when all models have been
created and are ready for queries.
"""
addStandardHeaders()
addCORSHeaders()
return json.dumps(True)
def POST(self,
modelName=ImbuModels.defaultModelType,
dataset=ImbuModels.defaultDataset): # pylint: disable=R0201,C0103
addStandardHeaders()
addCORSHeaders()
response = {}
data = web.data()
if data:
if isinstance(data, basestring):
response = g_fluent.query(dataset, modelName, data)
else:
raise web.badrequest("Invalid Data. Query data must be a string")
if len(response) == 0:
# No data, just return all samples
# See "ImbuModels.formatResults" for expected format
for item in g_imbus[dataset].dataDict.items():
response[item[0]] = {"text": item[1][0], "scores": [0]}
return json.dumps(response)
urls = (
"", "DefaultHandler",
"/", "DefaultHandler",
"/fluent", "FluentAPIHandler",
"/fluent/datasets", "DatasetsHandler",
"/fluent/(.*)/(.*)", "FluentAPIHandler",
"/fluent/(.*)", "FluentAPIHandler"
)
app = web.application(urls, globals())
# Create Imbu model runner
g_fluent = FluentWrapper()
# Required by uWSGI per WSGI spec
application = app.wsgifunc()
| agpl-3.0 |
Diyago/Machine-Learning-scripts | DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/modules/functions.py | 1 | 10930 | import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from . import _ext
# Activation names
ACT_LEAKY_RELU = "leaky_relu"
ACT_ELU = "elu"
ACT_NONE = "none"
def _check(fn, *args, **kwargs):
success = fn(*args, **kwargs)
if not success:
raise RuntimeError("CUDA Error encountered in {}".format(fn))
def _broadcast_shape(x):
out_size = []
for i, s in enumerate(x.size()):
if i != 1:
out_size.append(1)
else:
out_size.append(s)
return out_size
def _reduce(x):
if len(x.size()) == 2:
return x.sum(dim=0)
else:
n, c = x.size()[0:2]
return x.contiguous().view((n, c, -1)).sum(2).sum(0)
def _count_samples(x):
count = 1
for i, s in enumerate(x.size()):
if i != 1:
count *= s
return count
def _act_forward(ctx, x):
if ctx.activation == ACT_LEAKY_RELU:
_check(_ext.leaky_relu_cuda, x, ctx.slope)
elif ctx.activation == ACT_ELU:
_check(_ext.elu_cuda, x)
elif ctx.activation == ACT_NONE:
pass
def _act_backward(ctx, x, dx):
if ctx.activation == ACT_LEAKY_RELU:
_check(_ext.leaky_relu_backward_cuda, x, dx, ctx.slope)
_check(_ext.leaky_relu_cuda, x, 1.0 / ctx.slope)
elif ctx.activation == ACT_ELU:
_check(_ext.elu_backward_cuda, x, dx)
_check(_ext.elu_inv_cuda, x)
elif ctx.activation == ACT_NONE:
pass
def _check_contiguous(*args):
if not all([mod is None or mod.is_contiguous() for mod in args]):
raise ValueError("Non-contiguous input")
class InPlaceABN(autograd.Function):
@staticmethod
def forward(
ctx,
x,
weight,
bias,
running_mean,
running_var,
training=True,
momentum=0.1,
eps=1e-05,
activation=ACT_LEAKY_RELU,
slope=0.01,
):
# Save context
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
n = _count_samples(x)
if ctx.training:
mean = x.new().resize_as_(running_mean)
var = x.new().resize_as_(running_var)
_check_contiguous(x, mean, var)
_check(_ext.bn_mean_var_cuda, x, mean, var)
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * n / (n - 1))
else:
mean, var = running_mean, running_var
_check_contiguous(x, mean, var, weight, bias)
_check(
_ext.bn_forward_cuda,
x,
mean,
var,
weight if weight is not None else x.new(),
bias if bias is not None else x.new(),
x,
x,
ctx.eps,
)
# Activation
_act_forward(ctx, x)
# Output
ctx.var = var
ctx.save_for_backward(x, weight, bias, running_mean, running_var)
ctx.mark_dirty(x)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
z, weight, bias, running_mean, running_var = ctx.saved_tensors
dz = dz.contiguous()
# Undo activation
_act_backward(ctx, z, dz)
if ctx.needs_input_grad[0]:
dx = dz.new().resize_as_(dz)
else:
dx = None
if ctx.needs_input_grad[1]:
dweight = dz.new().resize_as_(running_mean).zero_()
else:
dweight = None
if ctx.needs_input_grad[2]:
dbias = dz.new().resize_as_(running_mean).zero_()
else:
dbias = None
if ctx.training:
edz = dz.new().resize_as_(running_mean)
eydz = dz.new().resize_as_(running_mean)
_check_contiguous(z, dz, weight, bias, edz, eydz)
_check(
_ext.bn_edz_eydz_cuda,
z,
dz,
weight if weight is not None else dz.new(),
bias if bias is not None else dz.new(),
edz,
eydz,
ctx.eps,
)
else:
# TODO: implement CUDA backward for inference mode
edz = dz.new().resize_as_(running_mean).zero_()
eydz = dz.new().resize_as_(running_mean).zero_()
_check_contiguous(dz, z, ctx.var, weight, bias, edz, eydz, dx, dweight, dbias)
_check(
_ext.bn_backard_cuda,
dz,
z,
ctx.var,
weight if weight is not None else dz.new(),
bias if bias is not None else dz.new(),
edz,
eydz,
dx if dx is not None else dz.new(),
dweight if dweight is not None else dz.new(),
dbias if dbias is not None else dz.new(),
ctx.eps,
)
del ctx.var
return dx, dweight, dbias, None, None, None, None, None, None, None
class InPlaceABNSync(autograd.Function):
@classmethod
def forward(
cls,
ctx,
x,
weight,
bias,
running_mean,
running_var,
extra,
training=True,
momentum=0.1,
eps=1e-05,
activation=ACT_LEAKY_RELU,
slope=0.01,
):
# Save context
cls._parse_extra(ctx, extra)
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
n = _count_samples(x) * (ctx.master_queue.maxsize + 1)
if ctx.training:
mean = x.new().resize_(1, running_mean.size(0))
var = x.new().resize_(1, running_var.size(0))
_check_contiguous(x, mean, var)
_check(_ext.bn_mean_var_cuda, x, mean, var)
if ctx.is_master:
means, vars = [mean], [var]
for _ in range(ctx.master_queue.maxsize):
mean_w, var_w = ctx.master_queue.get()
ctx.master_queue.task_done()
means.append(mean_w)
vars.append(var_w)
means = comm.gather(means)
vars = comm.gather(vars)
mean = means.mean(0)
var = (vars + (mean - means) ** 2).mean(0)
tensors = comm.broadcast_coalesced(
(mean, var), [mean.get_device()] + ctx.worker_ids
)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((mean, var))
mean, var = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * n / (n - 1))
else:
mean, var = running_mean, running_var
_check_contiguous(x, mean, var, weight, bias)
_check(
_ext.bn_forward_cuda,
x,
mean,
var,
weight if weight is not None else x.new(),
bias if bias is not None else x.new(),
x,
x,
ctx.eps,
)
# Activation
_act_forward(ctx, x)
# Output
ctx.var = var
ctx.save_for_backward(x, weight, bias, running_mean, running_var)
ctx.mark_dirty(x)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
z, weight, bias, running_mean, running_var = ctx.saved_tensors
dz = dz.contiguous()
# Undo activation
_act_backward(ctx, z, dz)
if ctx.needs_input_grad[0]:
dx = dz.new().resize_as_(dz)
else:
dx = None
if ctx.needs_input_grad[1]:
dweight = dz.new().resize_as_(running_mean).zero_()
else:
dweight = None
if ctx.needs_input_grad[2]:
dbias = dz.new().resize_as_(running_mean).zero_()
else:
dbias = None
if ctx.training:
edz = dz.new().resize_as_(running_mean)
eydz = dz.new().resize_as_(running_mean)
_check_contiguous(z, dz, weight, bias, edz, eydz)
_check(
_ext.bn_edz_eydz_cuda,
z,
dz,
weight if weight is not None else dz.new(),
bias if bias is not None else dz.new(),
edz,
eydz,
ctx.eps,
)
if ctx.is_master:
edzs, eydzs = [edz], [eydz]
for _ in range(len(ctx.worker_queues)):
edz_w, eydz_w = ctx.master_queue.get()
ctx.master_queue.task_done()
edzs.append(edz_w)
eydzs.append(eydz_w)
edz = comm.reduce_add(edzs) / (ctx.master_queue.maxsize + 1)
eydz = comm.reduce_add(eydzs) / (ctx.master_queue.maxsize + 1)
tensors = comm.broadcast_coalesced(
(edz, eydz), [edz.get_device()] + ctx.worker_ids
)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((edz, eydz))
edz, eydz = ctx.worker_queue.get()
ctx.worker_queue.task_done()
else:
edz = dz.new().resize_as_(running_mean).zero_()
eydz = dz.new().resize_as_(running_mean).zero_()
_check_contiguous(dz, z, ctx.var, weight, bias, edz, eydz, dx, dweight, dbias)
_check(
_ext.bn_backard_cuda,
dz,
z,
ctx.var,
weight if weight is not None else dz.new(),
bias if bias is not None else dz.new(),
edz,
eydz,
dx if dx is not None else dz.new(),
dweight if dweight is not None else dz.new(),
dbias if dbias is not None else dz.new(),
ctx.eps,
)
del ctx.var
return dx, dweight, dbias, None, None, None, None, None, None, None, None
@staticmethod
def _parse_extra(ctx, extra):
ctx.is_master = extra["is_master"]
if ctx.is_master:
ctx.master_queue = extra["master_queue"]
ctx.worker_queues = extra["worker_queues"]
ctx.worker_ids = extra["worker_ids"]
else:
ctx.master_queue = extra["master_queue"]
ctx.worker_queue = extra["worker_queue"]
inplace_abn = InPlaceABN.apply
inplace_abn_sync = InPlaceABNSync.apply
__all__ = ["inplace_abn", "inplace_abn_sync"]
| apache-2.0 |
shijx12/DeepSim | deepSimGAN/util.py | 1 | 8623 | from lib.datasets.factory import get_imdb
import numpy as np
import tensorflow as tf
import cv2
import random
import cfg
class DataFetcher:
def __init__(self, imdb_name, resize=True):
imdb = get_imdb(imdb_name)
# Ignore the background class!!! So ['gt_classes'] must minus 1.
self.classes = [ np.zeros(imdb.num_classes - 1) for i in range(imdb.num_images) ]
for i, anno in enumerate(imdb.gt_roidb()):
np.put(self.classes[i], map(lambda x: x-1, anno['gt_classes']), 1)
# np.put(self.classes[i], random.choice(map(lambda x: x-1, anno['gt_classes'])), 1)
self.images = [ imdb.image_path_at(i) for i in range(imdb.num_images) ]
assert len(self.classes) == len(self.images)
self._perm = np.random.permutation(np.arange(len(self.images)))
self._cur = 0
self.resize = resize
def nextbatch(self, batch_size=1):
# if all images have been trained, permuate again.
blobs = {'data':[], 'path':[], 'classes':[], 'keep_prob':[], 'im_info':[]}
for batch in range(batch_size):
if self._cur >= len(self.images):
self._cur = 0
self._perm = np.random.permutation(np.arange(len(self.images)))
i = self._perm[self._cur]
self._cur += 1
im = cv2.imread(self.images[i]).astype(np.float32, copy=False)
if self.resize:
im = np.stack([cv2.resize(im[:,:,i], (cfg.RESIZED_SIZE, cfg.RESIZED_SIZE)) for i in range(im.shape[2])], axis=2)
blobs['data'].append(im)
blobs['path'].append(self.images[i])
blobs['classes'].append(self.classes[i])
blobs['keep_prob'].append(0.5)
# im_info: a list of [image_height, image_width, scale_ratios]
# im_scale=1, that is, we don't scale the original image size.
blobs['im_info'].append([im.shape[0], im.shape[1], 1])
return blobs
def crop(image, resized_size, cropped_size):
# image is of arbitrary size.
# return a Tensor representing image of size cropped_size x cropped_size
image = tf.image.resize_images(image, [resized_size, resized_size], method=tf.image.ResizeMethod.AREA)
offset = tf.cast(tf.floor(tf.random_uniform([2], 0, resized_size - cropped_size + 1)), dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, offset[0], offset[1], cropped_size, cropped_size)
return image
def subtract_mean(image):
# image is a Tensor.
# return a Tensor.
image = tf.cast(image, dtype=tf.float32)
return image - tf.convert_to_tensor(cfg.PIXEL_MEANS, dtype=tf.float32)
def prep(image):
# change range from [0, 256) to [-1, 1]
# image is a Tensor.
# return a float32 Tensor.
image = tf.cast(image, dtype=tf.float32)
return (image / 255.0) * 2 - 1
def invprep(image):
# change range from [-1, 1] to [0, 256)
# image is a float32 Tensor.
# return a uint8 Tensor.
image = (image + 1) / 2.0 * 255.9
return image
def bgr2rgb(image):
image = tf.cast(image, dtype=tf.uint8)
return image[:,:,:,::-1]
def make_var(name, shape, initializer=None, trainable=True, regularizer=None):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable, regularizer=regularizer)
def l2_regularizer(weight_decay=0.0005, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, default_name='l2_regularizer', values=[tensor]):
l2_weight = tf.convert_to_tensor(weight_decay, dtype=tensor.dtype.base_dtype, name='weight_decay')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
def batch_norm(input, scope='batchnorm'):
with tf.variable_scope(scope):
input = tf.identity(input)
dims = input.get_shape()
if len(dims) == 4:
channels = dims[3]
offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)
elif len(dims) == 2:
channels = dims[1]
offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[0], keep_dims=False)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
return normalized
def leaky_relu(input, alpha=0.3, name='leaky_relu'):
return tf.maximum(alpha*input, input, name)
def conv(input, k_h, k_w, c_o, s_h, s_w, name, biased=True, activation='relu', bn=False, init='msra', pad='SAME', trainable=True):
c_i = input.get_shape()[-1] # channel_input
with tf.variable_scope(name) as scope:
if init == 'msra':
init_weights = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
else:
raise Exception('Invalid init')
kernel = make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable, regularizer=l2_regularizer(cfg.WEIGHT_DECAY))
h = tf.nn.conv2d(input, kernel, [1, s_h, s_w, 1], padding=pad)
if biased:
init_bias = tf.constant_initializer(0.0)
bias = make_var('biases', [c_o], init_bias, trainable)
h = tf.nn.bias_add(h, bias)
if bn:
h = batch_norm(h)
if activation == 'relu':
h = tf.nn.relu(h)
elif activation == 'leaky_relu':
h = leaky_relu(h)
return h
def upconv(input, c_o, ksize, stride, name, biased=False, activation='relu', bn=False, init='msra', pad='SAME', trainable=True):
c_i = input.get_shape()[-1] # channel_input
in_shape = tf.shape(input)
if pad == 'SAME':
output_shape = [in_shape[0], in_shape[1]*stride, in_shape[2]*stride, c_o]
else:
raise Exception('Sorry not support padding VALID')
kernel_shape = [ksize, ksize, c_o, c_i]
with tf.variable_scope(name) as scope:
if init == 'msra':
init_weights = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
else:
raise Exception('Invalid init')
kernel = make_var('weights', kernel_shape, init_weights, trainable, regularizer=l2_regularizer(cfg.WEIGHT_DECAY))
h = tf.nn.conv2d_transpose(input, kernel, output_shape, [1, stride, stride, 1], padding=pad)
h = tf.reshape(h, output_shape) # reshape is necessary
if biased:
init_bias = tf.constant_initializer(0.0)
bias = make_var('biases', [c_o], init_bias, trainable)
h = tf.nn.bias_add(h, bias)
if bn:
h = batch_norm(h)
if activation == 'relu':
h = tf.nn.relu(h)
elif activation == 'leaky_relu':
h = leaky_relu(h)
return h
def max_pool(input, k_h, k_w, s_h, s_w, name, pad='SAME'):
return tf.nn.max_pool(input, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=pad, name=name)
def avg_pool(input, k_h, k_w, s_h, s_w, name, pad='SAME'):
return tf.nn.avg_pool(input, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=pad, name=name)
def fc(input, c_o, name, biased=True, activation='relu', bn=False, init='msra', trainable=True):
c_i = input.get_shape()[-1]
with tf.variable_scope(name) as scope:
if init == 'msra':
init_weights = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
else:
raise Exception('Invalid init')
weights = make_var('weights', [c_i, c_o], init_weights, trainable, regularizer=l2_regularizer(cfg.WEIGHT_DECAY))
h = tf.matmul(input, weights)
if biased:
init_bias = tf.constant_initializer(0.0)
bias = make_var('biases', [c_o], init_bias, trainable)
h = tf.nn.bias_add(h, bias)
if bn:
h = batch_norm(h)
if activation == 'relu':
h = tf.nn.relu(h)
elif activation == 'leaky_relu':
h = leaky_relu(h)
return h
def sum_act(h, sparsity=False):
tf.summary.histogram('activation/'+h.name, h)
if sparsity:
tf.summary.scalar('sparsity/'+h.name, tf.nn.zero_fraction(h))
| mit |
lilleswing/deepchem | contrib/one_shot_models/multitask_regressor.py | 5 | 8197 | """
Implements a multitask graph-convolutional regression.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import os
import sys
import numpy as np
import tensorflow as tf
import sklearn.metrics
import tempfile
from deepchem.utils.save import log
from deepchem.models import Model
from deepchem.nn.copy import Input
from deepchem.nn.copy import Dense
from deepchem.data import pad_features
from deepchem.nn import model_ops
# TODO(rbharath): Find a way to get rid of this import?
from deepchem.models.tf_new_models.graph_topology import merge_dicts
from deepchem.models.tf_new_models.multitask_classifier import get_loss_fn
class MultitaskGraphRegressor(Model):
def __init__(self,
model,
n_tasks,
n_feat,
logdir=None,
batch_size=50,
final_loss='weighted_L2',
learning_rate=.001,
optimizer_type="adam",
learning_rate_decay_time=1000,
beta1=.9,
beta2=.999,
pad_batches=True,
verbose=True):
warnings.warn(
"MultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(MultitaskGraphRegressor, self).__init__(
model_dir=logdir, verbose=verbose)
self.n_tasks = n_tasks
self.final_loss = final_loss
self.model = model
self.sess = tf.Session(graph=self.model.graph)
with self.model.graph.as_default():
# Extract model info
self.batch_size = batch_size
self.pad_batches = pad_batches
# Get graph topology for x
self.graph_topology = self.model.get_graph_topology()
self.feat_dim = n_feat
# Building outputs
self.outputs = self.build()
self.loss_op = self.add_training_loss(self.final_loss, self.outputs)
self.learning_rate = learning_rate
self.T = learning_rate_decay_time
self.optimizer_type = optimizer_type
self.optimizer_beta1 = beta1
self.optimizer_beta2 = beta2
# Set epsilon
self.epsilon = 1e-7
self.add_optimizer()
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(self.model_dir, 'model.ckpt')
def build(self):
# Create target inputs
self.label_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
feat = self.model.return_outputs()
feat_size = feat.get_shape()[-1].value
outputs = []
for task in range(self.n_tasks):
outputs.append(
tf.squeeze(
model_ops.fully_connected_layer(
tensor=feat,
size=1,
weight_init=tf.truncated_normal(
shape=[feat_size, 1], stddev=0.01),
bias_init=tf.constant(value=0., shape=[1]))))
return outputs
def add_optimizer(self):
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(
self.learning_rate,
beta1=self.optimizer_beta1,
beta2=self.optimizer_beta2,
epsilon=self.epsilon)
else:
raise ValueError("Optimizer type not recognized.")
# Get train function
self.train_op = self.optimizer.minimize(self.loss_op)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True):
"""Get initial information about task normalization"""
# TODO(rbharath): I believe this is total amount of data
n_samples = len(X_b)
if y_b is None:
y_b = np.zeros((n_samples, self.n_tasks))
if w_b is None:
w_b = np.zeros((n_samples, self.n_tasks))
targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b}
# Get graph information
atoms_dict = self.graph_topology.batch_to_feed_dict(X_b)
# TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being
# the number of labeled data points in target_i. This is to normalize each task
# num_dat_dict = {self.num_datapoints_placeholder : self.}
# Get other optimizer information
# TODO(rbharath): Figure out how to handle phase appropriately
feed_dict = merge_dicts([targets_dict, atoms_dict])
return feed_dict
def add_training_loss(self, final_loss, outputs):
"""Computes loss using logits."""
loss_fn = get_loss_fn(final_loss) # Get loss function
task_losses = []
# label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
# tensors of shape (batch_size,)
task_labels = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
task_weights = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
for task in range(self.n_tasks):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
task_loss = loss_fn(outputs[task], tf.squeeze(task_label_vector),
tf.squeeze(task_weight_vector))
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
# Perform the optimization
log("Training for %d epochs" % nb_epoch, self.verbose)
# TODO(rbharath): Disabling saving for now to try to debug.
for epoch in range(nb_epoch):
log("Starting epoch %d" % epoch, self.verbose)
for batch_num, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if batch_num % log_every_N_batches == 0:
log("On batch %d" % batch_num, self.verbose)
self.sess.run(
self.train_op, feed_dict=self.construct_feed_dict(X_b, y_b, w_b))
def save(self):
"""
No-op since this model doesn't currently support saving...
"""
pass
def predict(self, dataset, transformers=[], **kwargs):
"""Wraps predict to set batch_size/padding."""
return super(MultitaskGraphRegressor, self).predict(
dataset, transformers, batch_size=self.batch_size)
def predict_on_batch(self, X):
"""Return model output for the provided input.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
# run eval data through the model
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
# Shape (n_samples, n_tasks)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks))
for task, output in enumerate(batch_outputs):
outputs[:, task] = output
return outputs
def get_num_tasks(self):
"""Needed to use Model.predict() from superclass."""
return self.n_tasks
class DTNNMultitaskGraphRegressor(MultitaskGraphRegressor):
def build(self):
# Create target inputs
warnings.warn(
"DTNNMultitaskGraphRegressor is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.label_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
feat = self.model.return_outputs()
outputs = []
for task in range(self.n_tasks):
outputs.append(feat[:, task])
return outputs
| mit |
luo66/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/metrics/regression.py | 174 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
luo66/scikit-learn | examples/manifold/plot_manifold_sphere.py | 257 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | examples/cluster/plot_lena_segmentation.py | 269 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/t2t/tensor2tensor/bin/t2t_datagen.py | 3 | 11111 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produces the training and dev data for --problem into --data_dir.
Produces sharded and shuffled TFRecord files of tensorflow.Example protocol
buffers for a variety of registered datasets.
All Problems are registered with @registry.register_problem or are in
_SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name
(selectable on the command-line with --problem) to a function that takes 2
arguments - input_directory and mode (one of "train" or "dev") - and yields for
each training example a dictionary mapping string feature names to lists of
{string, int, float}. The generator will be run once for each mode.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import random
import tempfile
import numpy as np
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
try:
# pylint: disable=g-import-not-at-top
from tensor2tensor.data_generators import algorithmic_math
from tensor2tensor.data_generators import audio
from tensor2tensor.data_generators import snli
from tensor2tensor.data_generators import wsj_parsing
# pylint: enable=g-import-not-at-top
except ImportError:
pass
# Improrting here to prevent pylint from ungrouped-imports warning.
import tensorflow as tf # pylint: disable=g-import-not-at-top
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("data_dir", "", "Data directory.")
flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen",
"Temporary storage directory.")
flags.DEFINE_string("problem", "",
"The name of the problem to generate data for.")
flags.DEFINE_string("exclude_problems", "",
"Comma-separates list of problems to exclude.")
flags.DEFINE_integer("num_shards", 0, "How many shards to use. Ignored for "
"registered Problems.")
flags.DEFINE_integer("max_cases", 0,
"Maximum number of cases to generate (unbounded if 0).")
flags.DEFINE_bool("only_list", False,
"If true, we only list the problems that will be generated.")
flags.DEFINE_integer("random_seed", 429459, "Random seed to use.")
flags.DEFINE_integer("task_id", -1, "For distributed data generation.")
flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.")
flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.")
flags.DEFINE_integer(
"num_concurrent_processes", None,
"Applies only to problems for which multiprocess_generate=True.")
flags.DEFINE_string("t2t_usr_dir", "",
"Path to a Python module that will be imported. The "
"__init__.py file should include the necessary imports. "
"The imported files should contain registrations, "
"e.g. @registry.register_problem calls, that will then be "
"available to t2t-datagen.")
# Mapping from problems that we can generate data for to their generators.
# pylint: disable=g-long-lambda
_SUPPORTED_PROBLEM_GENERATORS = {
"algorithmic_algebra_inverse": (
lambda: algorithmic_math.algebra_inverse(26, 0, 2, 100000),
lambda: algorithmic_math.algebra_inverse(26, 3, 3, 10000),
lambda: None), # test set
"parsing_english_ptb8k": (
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 2**13, 2**9),
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 2**13, 2**9),
lambda: None), # test set
"parsing_english_ptb16k": (
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 2**14, 2**9),
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 2**14, 2**9),
lambda: None), # test set
"inference_snli32k": (
lambda: snli.snli_token_generator(FLAGS.tmp_dir, True, 2**15),
lambda: snli.snli_token_generator(FLAGS.tmp_dir, False, 2**15),
lambda: None), # test set
"audio_timit_characters_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626),
lambda: None), # test set
"audio_timit_tokens_8k_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,
vocab_filename="vocab.endefr.%d" % 2**13, vocab_size=2**13),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626,
vocab_filename="vocab.endefr.%d" % 2**13, vocab_size=2**13),
lambda: None), # test set
"audio_timit_tokens_32k_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,
vocab_filename="vocab.endefr.%d" % 2**15, vocab_size=2**15),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626,
vocab_filename="vocab.endefr.%d" % 2**15, vocab_size=2**15),
lambda: None), # test set
}
# pylint: enable=g-long-lambda
def set_random_seed():
"""Set the random seed from flag everywhere."""
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
def main(_):
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# Calculate the list of problems to generate.
problems = sorted(
list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems())
for exclude in FLAGS.exclude_problems.split(","):
if exclude:
problems = [p for p in problems if exclude not in p]
if FLAGS.problem and FLAGS.problem[-1] == "*":
problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])]
elif FLAGS.problem and "," in FLAGS.problem:
problems = [p for p in problems if p in FLAGS.problem.split(",")]
elif FLAGS.problem:
problems = [p for p in problems if p == FLAGS.problem]
else:
problems = []
# Remove TIMIT if paths are not given.
if getattr(FLAGS, "timit_paths", None):
problems = [p for p in problems if "timit" not in p]
# Remove parsing if paths are not given.
if getattr(FLAGS, "parsing_path", None):
problems = [p for p in problems if "parsing_english_ptb" not in p]
if not problems:
problems_str = "\n * ".join(
sorted(list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems()))
error_msg = ("You must specify one of the supported problems to "
"generate data for:\n * " + problems_str + "\n")
error_msg += ("TIMIT and parsing need data_sets specified with "
"--timit_paths and --parsing_path.")
raise ValueError(error_msg)
if not FLAGS.data_dir:
FLAGS.data_dir = tempfile.gettempdir()
tf.logging.warning("It is strongly recommended to specify --data_dir. "
"Data will be written to default data_dir=%s.",
FLAGS.data_dir)
FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir)
tf.gfile.MakeDirs(FLAGS.data_dir)
tf.logging.info("Generating problems:\n%s"
% registry.display_list_by_prefix(problems,
starting_spaces=4))
if FLAGS.only_list:
return
for problem in problems:
set_random_seed()
if problem in _SUPPORTED_PROBLEM_GENERATORS:
generate_data_for_problem(problem)
else:
generate_data_for_registered_problem(problem)
def generate_data_for_problem(problem):
"""Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS."""
training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]
num_train_shards = FLAGS.num_shards or 10
tf.logging.info("Generating training data for %s.", problem)
train_output_files = generator_utils.train_data_filenames(
problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
num_train_shards)
generator_utils.generate_files(training_gen(), train_output_files,
FLAGS.max_cases)
num_dev_shards = int(num_train_shards * 0.1)
tf.logging.info("Generating development data for %s.", problem)
dev_output_files = generator_utils.dev_data_filenames(
problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
num_dev_shards)
generator_utils.generate_files(dev_gen(), dev_output_files)
num_test_shards = int(num_train_shards * 0.1)
test_output_files = []
test_gen_data = test_gen()
if test_gen_data is not None:
tf.logging.info("Generating test data for %s.", problem)
test_output_files = generator_utils.test_data_filenames(
problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
num_test_shards)
generator_utils.generate_files(test_gen_data, test_output_files)
all_output_files = train_output_files + dev_output_files + test_output_files
generator_utils.shuffle_dataset(all_output_files)
def generate_data_in_process(arg):
problem_name, data_dir, tmp_dir, task_id = arg
problem = registry.problem(problem_name)
problem.generate_data(data_dir, tmp_dir, task_id)
def generate_data_for_registered_problem(problem_name):
"""Generate data for a registered problem."""
tf.logging.info("Generating data for %s.", problem_name)
if FLAGS.num_shards:
raise ValueError("--num_shards should not be set for registered Problem.")
problem = registry.problem(problem_name)
task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
if task_id is None and problem.multiprocess_generate:
if FLAGS.task_id_start != -1:
assert FLAGS.task_id_end != -1
task_id_start = FLAGS.task_id_start
task_id_end = FLAGS.task_id_end
else:
task_id_start = 0
task_id_end = problem.num_generate_tasks
pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)
problem.prepare_to_generate(data_dir, tmp_dir)
args = [(problem_name, data_dir, tmp_dir, task_id)
for task_id in range(task_id_start, task_id_end)]
pool.map(generate_data_in_process, args)
else:
problem.generate_data(data_dir, tmp_dir, task_id)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| apache-2.0 |
tomsilver/nupic | examples/opf/experiments/multistep/simple_3_enc/description.py | 17 | 1788 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_3.csv'),
'modelParams': { 'clParams': { 'clVerbosity': 1, 'steps': '1,3'},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { }, 'verbosity': 1},
'spEnable': False,
'spParams': { },
'tpEnable': False,
'tpParams': { }},
'predictionSteps': [1, 3]}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
mlflow/mlflow | mlflow/prophet.py | 1 | 13361 | """
The ``mlflow.prophet`` module provides an API for logging and loading Prophet models.
This module exports univariate Prophet models in the following flavors:
Prophet (native) format
This is the main flavor that can be accessed with Prophet APIs.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and for batch auditing
of historical forecasts.
.. _Prophet:
https://facebook.github.io/prophet/docs/quick_start.html#python-api
"""
import os
import yaml
import json
import mlflow
from mlflow import pyfunc
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.environment import (
_mlflow_conda_env,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils.file_utils import write_to
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import _save_example
from mlflow.models import Model, ModelInputExample
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_validate_and_prepare_target_save_path,
)
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
FLAVOR_NAME = "prophet"
_MODEL_BINARY_KEY = "data"
_MODEL_BINARY_FILE_NAME = "model.pr"
_MODEL_TYPE_KEY = "model_type"
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at a minimum, contains these requirements.
"""
# Note: Prophet's whl build process will fail due to missing dependencies, defaulting
# to setup.py installation process.
# If a pystan installation error occurs, ensure gcc>=8 is installed in your environment.
# See: https://gcc.gnu.org/install/
return [_get_pinned_requirement("prophet")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def save_model(
pr_model,
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Save a Prophet model to a path on the local file system.
:param pr_model: Prophet model (an instance of Prophet() forecaster that has been fit
on a temporal series.
:param path: Local path where the serialized model (as JSON) is to be saved.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
model = Prophet().fit(df)
train = model.history
predictions = model.predict(model.make_future_dataframe(30))
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
import prophet
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
model_data_path = os.path.join(path, _MODEL_BINARY_FILE_NAME)
_save_model(pr_model, model_data_path)
model_bin_kwargs = {_MODEL_BINARY_KEY: _MODEL_BINARY_FILE_NAME}
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.prophet",
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
**model_bin_kwargs,
)
flavor_conf = {
_MODEL_TYPE_KEY: pr_model.__class__.__name__,
**model_bin_kwargs,
}
mlflow_model.add_flavor(
FLAVOR_NAME,
prophet_version=prophet.__version__,
code=code_dir_subpath,
**flavor_conf,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
# cannot use inferred requirements due to prophet's build process
# as the package installation of pystan requires Cython to be present
# in the path. Prophet's installation itself requires imports of
# existing libraries, preventing the execution of a batched pip install
# and instead using a a strictly defined list of dependencies.
# NOTE: if Prophet .whl build architecture is changed, this should be
# modified to a standard inferred approach.
default_reqs = get_default_pip_requirements()
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name=FLAVOR_NAME))
def log_model(
pr_model,
artifact_path,
conda_env=None,
code_paths=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Prophet model as an MLflow artifact for the current run.
:param pr_model: Prophet model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output
:py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred
<mlflow.models.infer_signature>` from datasets with valid model input
(e.g. the training dataset with target column omitted) and valid model
output (e.g. model predictions generated on the training dataset),
for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
model = Prophet().fit(df)
train = model.history
predictions = model.predict(model.make_future_dataframe(30))
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to
feed the model. The given example will be converted to a
Pandas DataFrame and then serialized to json using the
Pandas split-oriented format. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version
to finish being created and is in ``READY`` status.
By default, the function waits for five minutes.
Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.prophet,
registered_model_name=registered_model_name,
pr_model=pr_model,
conda_env=conda_env,
code_paths=code_paths,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model(model, path):
from prophet.serialize import model_to_json
model_ser = model_to_json(model)
with open(path, "w") as f:
json.dump(model_ser, f)
def _load_model(path):
from prophet.serialize import model_from_json
with open(path, "r") as f:
model = json.load(f)
return model_from_json(model)
def _load_pyfunc(path):
"""
Load PyFunc implementation for Prophet. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``prophet`` flavor.
"""
return _ProphetModelWrapper(_load_model(path))
def load_model(model_uri, dst_path=None):
"""
Load a Prophet model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/tracking.html#
artifact-locations>`_.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A Prophet model instance
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
pr_model_path = os.path.join(
local_model_path, flavor_conf.get(_MODEL_BINARY_KEY, _MODEL_BINARY_FILE_NAME)
)
return _load_model(pr_model_path)
class _ProphetModelWrapper:
def __init__(self, pr_model):
self.pr_model = pr_model
def predict(self, dataframe):
return self.pr_model.predict(dataframe)
| apache-2.0 |
elkingtonmcb/h2o-2 | py/testdir_ec2_only/test_KMeans_allstate_s3n_thru_hdfs.py | 9 | 2172 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_kmeans, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_allstate_s3n_thru_hdfs(self):
bucket = 'home-0xdiag-datasets'
importFolderPath = 'allstate'
csvFilename = "train_set.csv"
csvPathname = importFolderPath + "/" + csvFilename
timeoutSecs = 600
trialMax = 3
for trial in range(trialMax):
trialStart = time.time()
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='s3n', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print "parse end on ", csvPathname, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
kwargs = {
'cols': None,
'initialization': 'Furthest',
'k': 12
}
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=120, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvFilename, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_kmeans.simpleCheckKMeans(self, kmeans, **kwargs)
inspect = h2o_cmd.runInspect(None,key=kmeans['destination_key'])
print h2o.dump_json(inspect)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds.", \
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
LohithBlaze/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 84 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
fx2003/tensorflow-study | TensorFlow实战/models/lfads/synth_data/synthetic_data_utils.py | 3 | 10613 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
from utils import write_datasets
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
def generate_rnn(rng, N, g, tau, dt, max_firing_rate):
"""Create a (vanilla) RNN with a bunch of hyper parameters for generating
chaotic data.
Args:
rng: numpy random number generator
N: number of hidden units
g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N)
tau: time scale of individual unit dynamics
dt: time step for equation updates
max_firing_rate: how to resecale the -1,1 firing rates
Returns:
the dictionary of these parameters, plus some others.
"""
rnn = {}
rnn['N'] = N
rnn['W'] = rng.randn(N,N)/np.sqrt(N)
rnn['Bin'] = rng.randn(N)/np.sqrt(1.0)
rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0)
rnn['b'] = np.zeros(N)
rnn['g'] = g
rnn['tau'] = tau
rnn['dt'] = dt
rnn['max_firing_rate'] = max_firing_rate
mfr = rnn['max_firing_rate'] # spikes / sec
nbins_per_sec = 1.0/rnn['dt'] # bins / sec
# Used for plotting in LFADS
rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin
return rnn
def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0,
input_times=None):
""" Generates data from an randomly initialized RNN.
Args:
rnn: the rnn
T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down.
E: total number of examples
S: number of samples (subsampling N)
Returns:
A list of length E of NxT tensors of the network being run.
"""
N = rnn['N']
def run_rnn(rnn, x0, ntime_steps, input_time=None):
rs = np.zeros([N,ntime_steps])
x_tm1 = x0
r_tm1 = np.tanh(x0)
tau = rnn['tau']
dt = rnn['dt']
alpha = (1.0-dt/tau)
W = dt/tau*rnn['W']*rnn['g']
Bin = dt/tau*rnn['Bin']
Bin2 = dt/tau*rnn['Bin2']
b = dt/tau*rnn['b']
us = np.zeros([1, ntime_steps])
for t in range(ntime_steps):
x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b
if input_time is not None and t == input_time:
us[0,t] = input_magnitude
x_t += Bin * us[0,t] # DCS is this what was used?
r_t = np.tanh(x_t)
x_tm1 = x_t
r_tm1 = r_t
rs[:,t] = r_t
return rs, us
if P_sxn is None:
P_sxn = np.eye(N)
ntime_steps = int(T / rnn['dt'])
data_e = []
inputs_e = []
for e in range(E):
input_time = input_times[e] if input_times is not None else None
r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time)
r_sxt = np.dot(P_sxn, r_nxt)
inputs_e.append(u_uxt)
data_e.append(r_sxt)
S = P_sxn.shape[0]
data_e = normalize_rates(data_e, E, S)
return data_e, x0s, inputs_e
def normalize_rates(data_e, E, S):
# Normalization, made more complex because of the P matrices.
# Normalize by min and max in each channel. This normalization will
# cause offset differences between identical rnn runs, but different
# t hits.
for e in range(E):
r_sxt = data_e[e]
for i in range(S):
rmin = np.min(r_sxt[i,:])
rmax = np.max(r_sxt[i,:])
assert rmax - rmin != 0, 'Something wrong'
r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin)
data_e[e] = r_sxt
return data_e
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_data_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
spikifies_data_e = []
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e
def get_train_n_valid_inds(num_trials, train_fraction, nspikifications):
"""Split the numbers between 0 and num_trials-1 into two portions for
training and validation, based on the train fraction.
Args:
num_trials: the number of trials
train_fraction: (e.g. .80)
nspikifications: the number of spiking trials per initial condition
Returns:
a 2-tuple of two lists: the training indices and validation indices
"""
train_inds = []
valid_inds = []
for i in range(num_trials):
# This line divides up the trials so that within one initial condition,
# the randomness of spikifying the condition is shared among both
# training and validation data splits.
if (i % nspikifications)+1 > train_fraction * nspikifications:
valid_inds.append(i)
else:
train_inds.append(i)
return train_inds, valid_inds
def split_list_by_inds(data, inds1, inds2):
"""Take the data, a list, and split it up based on the indices in inds1 and
inds2.
Args:
data: the list of data to split
inds1, the first list of indices
inds2, the second list of indices
Returns: a 2-tuple of two lists.
"""
if data is None or len(data) == 0:
return [], []
else:
dout1 = [data[i] for i in inds1]
dout2 = [data[i] for i in inds2]
return dout1, dout2
def nparray_and_transpose(data_a_b_c):
"""Convert the list of items in data to a numpy array, and transpose it
Args:
data: data_asbsc: a nested, nested list of length a, with sublist length
b, with sublist length c.
Returns:
a numpy 3-tensor with dimensions a x c x b
"""
data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
return data_axcxb
def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None):
"""Create a matrix that aligns the datasets a bit, under
the assumption that each dataset is observing the same underlying dynamical
system.
Args:
datasets: The dictionary of dataset structures.
npcs: The number of pcs for each, basically like lfads factors.
nsamples (optional): Number of samples to take for each dataset.
ntime (optional): Number of time steps to take in each sample.
Returns:
The dataset structures, with the field alignment_matrix_cxf added.
This is # channels x npcs dimension
"""
nchannels_all = 0
channel_idxs = {}
conditions_all = {}
nconditions_all = 0
for name, dataset in datasets.items():
cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns
channel_idxs[name] = [cidxs[0], cidxs[-1]+1]
nchannels_all += cidxs[-1]+1 - cidxs[0]
conditions_all[name] = np.unique(dataset['condition_labels_train'])
all_conditions_list = \
np.unique(np.ndarray.flatten(np.array(conditions_all.values())))
nconditions_all = all_conditions_list.shape[0]
if ntime is None:
ntime = dataset['train_data'].shape[1]
if nsamples is None:
nsamples = dataset['train_data'].shape[0]
# In the data workup in the paper, Chethan did intra condition
# averaging, so let's do that here.
avg_data_all = {}
for name, conditions in conditions_all.items():
dataset = datasets[name]
avg_data_all[name] = {}
for cname in conditions:
td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname)
data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1)
avg_data = np.mean(data, axis=0)
avg_data_all[name][cname] = avg_data
# Visualize this in the morning.
all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all])
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
for cname in conditions_all[name]:
cidxs = np.argwhere(all_conditions_list == cname)
if cidxs.shape[0] > 0:
cidx = cidxs[0][0]
all_tidxs = np.arange(0, ntime+1) + cidx*ntime
all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \
avg_data_all[name][cname].T
# A bit of filtering. We don't care about spectral properties, or
# filtering artifacts, simply correlate time steps a bit.
filt_len = 6
bc_filt = np.ones([filt_len])/float(filt_len)
for c in range(nchannels_all):
all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:])
# Compute the PCs.
all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True)
all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1
corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T)
evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn)
sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest
evals_n = evals_n[sidxs]
evecs_nxn = evecs_nxn[:,sidxs]
# Project all the channels data onto the low-D PCA basis, where
# low-d is the npcs parameter.
all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)
# Now for each dataset, we regress the channel data onto the top
# pcs, and this will be our alignment matrix for that dataset.
# |B - A*W|^2
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel
W_chxp, _, _, _ = \
np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T)
dataset['alignment_matrix_cxf'] = W_chxp
do_debug_plot = False
if do_debug_plot:
pc_vecs = evecs_nxn[:,0:npcs]
ntoplot = 400
plt.figure()
plt.plot(np.log10(evals_n), '-x')
plt.figure()
plt.subplot(311)
plt.imshow(all_data_pca_pxtc)
plt.colorbar()
plt.subplot(312)
plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc)
plt.colorbar()
import pdb
pdb.set_trace()
return datasets
| mit |
luo66/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 256 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 39 | 23697 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/datasets/tests/test_base.py | 204 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
glouppe/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 51 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
elkingtonmcb/h2o-2 | py/testdir_0xdata_only/test_hdfs_cdh5_fvec.py | 9 | 4072 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
import getpass
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1, use_hdfs=True, hdfs_version='cdh5', hdfs_name_node='172.16.2.180')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_hdfs_cdh5_fvec(self):
print "\nLoad a list of files from HDFS, parse and do 1 RF tree"
print "\nYou can try running as hduser/hduser if fail"
# larger set in my local dir
# fails because classes aren't integers
# "allstate_claim_prediction_train_set.zip",
csvFilenameAll = [
# "3G_poker_shuffle"
("and-testing.data", 60),
### "arcene2_train.both",
### "arcene_train.both",
### "bestbuy_test.csv",
("covtype.data", 60),
("covtype4x.shuffle.data", 60),
# "four_billion_rows.csv",
("hhp.unbalanced.012.data.gz", 60),
("hhp.unbalanced.data.gz", 60),
("leads.csv", 60),
# ("covtype.169x.data", 1200),
("prostate_long_1G.csv", 200),
("airlines_all.csv", 1200),
]
# pick 8 randomly!
if (1==0):
csvFilenameList = random.sample(csvFilenameAll,8)
# Alternatively: do the list in order! Note the order is easy to hard
else:
csvFilenameList = csvFilenameAll
# pop open a browser on the cloud
# h2b.browseTheCloud()
trial = 0
print "try importing /tmp2"
d = h2i.import_only(path="tmp2/*", schema='hdfs', timeoutSecs=1000)
for (csvFilename, timeoutSecs) in csvFilenameList:
# creates csvFilename.hex from file in hdfs dir
print "Loading", csvFilename, 'from HDFS'
start = time.time()
hex_key = "a.hex"
csvPathname = "datasets/" + csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs', hex_key=hex_key, timeoutSecs=1000)
print "hdfs parse of", csvPathname, "took", time.time() - start, 'secs'
start = time.time()
print "Saving", csvFilename, 'to HDFS'
print "Using /tmp2 to avoid the '.' prefixed files in /tmp2 (kills import)"
print "Unique per-user to avoid permission issues"
username = getpass.getuser()
csvPathname = "tmp2/a%s.%s.csv" % (trial, username)
# reuse the file name to avoid running out of space
csvPathname = "tmp2/a%s.%s.csv" % ('_h2o_export_files', username)
path = "hdfs://"+ h2o.nodes[0].hdfs_name_node + "/" + csvPathname
h2o.nodes[0].export_files(src_key=hex_key, path=path, force=1, timeoutSecs=timeoutSecs)
print "export_files of", hex_key, "to", path, "took", time.time() - start, 'secs'
trial += 1
print "Re-Loading", csvFilename, 'from HDFS'
start = time.time()
hex_key = "a2.hex"
time.sleep(2)
d = h2i.import_only(path=csvPathname, schema='hdfs', timeoutSecs=1000)
print h2o.dump_json(d)
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs', hex_key=hex_key, timeoutSecs=1000)
print "hdfs re-parse of", csvPathname, "took", time.time() - start, 'secs'
# currently fails
# print "This comparison test only works because na's are treated as 0's. bug fix might change that na-> na"
# execExpr = "sum(%s!=%s)" % ("a.hex", "a2.hex")
# resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
# self.assertEqual(result, 0.0, msg="a.hex and a2.hex weren't the same (NA treated as 0) %s" % result)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
lilleswing/deepchem | contrib/one_shot_models/support_classifier.py | 6 | 14629 | """
Train support-based models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import numpy as np
import tensorflow as tf
import sys
import time
from deepchem.models import Model
from deepchem.data import pad_batch
from deepchem.data import NumpyDataset
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.models.tf_new_models.graph_topology import merge_dicts
from deepchem.nn import model_ops
from deepchem.data import SupportGenerator
from deepchem.data import EpisodeGenerator
from deepchem.data import get_task_dataset
from deepchem.data import get_single_task_test
from deepchem.data import get_task_dataset_minus_support
from deepchem.nn.copy import Input
class SupportGraphClassifier(Model):
def __init__(self,
model,
test_batch_size=10,
support_batch_size=10,
learning_rate=.001,
similarity="cosine",
**kwargs):
"""Builds a support-based classifier.
See https://arxiv.org/pdf/1606.04080v1.pdf for definition of support.
Parameters
----------
sess: tf.Session
Session for this model
model: SequentialSupportModel
Contains core layers in model.
n_pos: int
Number of positive examples in support.
n_neg: int
Number of negative examples in support.
"""
warnings.warn("SupportGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.similarity = similarity
self.model = model
self.sess = tf.Session(graph=self.model.graph)
self.test_batch_size = test_batch_size
self.support_batch_size = support_batch_size
self.learning_rate = learning_rate
self.epsilon = 1e-7
with self.model.graph.as_default():
self.add_placeholders()
self.pred_op, self.scores_op, self.loss_op = self.add_training_loss()
# Get train function
self.train_op = self.get_training_op(self.loss_op)
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
def get_training_op(self, loss):
"""Attaches an optimizer to the graph."""
opt = tf.train.AdamOptimizer(self.learning_rate)
return opt.minimize(self.loss_op, name="train")
def add_placeholders(self):
"""Adds placeholders to graph."""
#################################################################### DEBUG
#self.test_label_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=(self.test_batch_size),
# name="label_placeholder"))
#self.test_weight_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=(self.test_batch_size),
# name="weight_placeholder"))
self.test_label_placeholder = tf.placeholder(
dtype='float32', shape=(self.test_batch_size), name="label_placeholder")
self.test_weight_placeholder = tf.placeholder(
dtype='float32',
shape=(self.test_batch_size),
name="weight_placeholder")
# TODO(rbharath): Should weights for the support be used?
# Support labels
#self.support_label_placeholder = Input(
# tensor=tf.placeholder(dtype='float32', shape=[self.support_batch_size],
# name="support_label_placeholder"))
self.support_label_placeholder = tf.placeholder(
dtype='float32',
shape=[self.support_batch_size],
name="support_label_placeholder")
self.phase = tf.placeholder(dtype='bool', name='keras_learning_phase')
#################################################################### DEBUG
def construct_feed_dict(self, test, support, training=True, add_phase=False):
"""Constructs tensorflow feed from test/support sets."""
# Generate dictionary elements for support
feed_dict = (
self.model.support_graph_topology.batch_to_feed_dict(support.X))
feed_dict[self.support_label_placeholder] = np.squeeze(support.y)
# Get graph information for test
batch_topo_dict = (
self.model.test_graph_topology.batch_to_feed_dict(test.X))
feed_dict = merge_dicts([batch_topo_dict, feed_dict])
# Generate dictionary elements for test
feed_dict[self.test_label_placeholder] = np.squeeze(test.y)
feed_dict[self.test_weight_placeholder] = np.squeeze(test.w)
if add_phase:
feed_dict[self.phase] = training
return feed_dict
def fit(self,
dataset,
n_episodes_per_epoch=1000,
nb_epochs=1,
n_pos=1,
n_neg=9,
log_every_n_samples=10,
**kwargs):
"""Fits model on dataset using cached supports.
For each epcoh, sample n_episodes_per_epoch (support, test) pairs and does
gradient descent.
Parameters
----------
dataset: dc.data.Dataset
Dataset to fit model on.
nb_epochs: int, optional
number of epochs of training.
n_episodes_per_epoch: int, optional
Number of (support, test) pairs to sample and train on per epoch.
n_pos: int, optional
Number of positive examples per support.
n_neg: int, optional
Number of negative examples per support.
log_every_n_samples: int, optional
Displays info every this number of samples
"""
time_start = time.time()
# Perform the optimization
n_tasks = len(dataset.get_task_names())
n_test = self.test_batch_size
feed_total, run_total = 0, 0
for epoch in range(nb_epochs):
# Create different support sets
episode_generator = EpisodeGenerator(dataset, n_pos, n_neg, n_test,
n_episodes_per_epoch)
recent_losses = []
for ind, (task, support, test) in enumerate(episode_generator):
if ind % log_every_n_samples == 0:
print("Epoch %d, Sample %d from task %s" % (epoch, ind, str(task)))
# Get batch to try it out on
feed_start = time.time()
feed_dict = self.construct_feed_dict(test, support)
feed_end = time.time()
feed_total += (feed_end - feed_start)
# Train on support set, batch pair
run_start = time.time()
_, loss = self.sess.run(
[self.train_op, self.loss_op], feed_dict=feed_dict)
run_end = time.time()
run_total += (run_end - run_start)
if ind % log_every_n_samples == 0:
mean_loss = np.mean(np.array(recent_losses))
print("\tmean loss is %s" % str(mean_loss))
recent_losses = []
else:
recent_losses.append(loss)
time_end = time.time()
print("fit took %s seconds" % str(time_end - time_start))
print("feed_total: %s" % str(feed_total))
print("run_total: %s" % str(run_total))
def save(self):
"""Save all models
TODO(rbharath): Saving is not yet supported for this model.
"""
pass
def add_training_loss(self):
"""Adds training loss and scores for network."""
pred, scores = self.get_scores()
losses = tf.nn.sigmoid_cross_entropy_with_logits(
logits=scores, labels=self.test_label_placeholder)
weighted_losses = tf.multiply(losses, self.test_weight_placeholder)
loss = tf.reduce_sum(weighted_losses)
return pred, scores, loss
def get_scores(self):
"""Adds tensor operations for computing scores.
Computes prediction yhat (eqn (1) in Matching networks) of class for test
compounds.
"""
# Get featurization for test
# Shape (n_test, n_feat)
test_feat = self.model.get_test_output()
# Get featurization for support
# Shape (n_support, n_feat)
support_feat = self.model.get_support_output()
# Computes the inner part c() of the kernel
# (the inset equation in section 2.1.1 of Matching networks paper).
# Normalize
if self.similarity == 'cosine':
g = model_ops.cosine_distances(test_feat, support_feat)
else:
raise ValueError("Only cosine similarity is supported.")
# TODO(rbharath): euclidean kernel is broken!
#elif self.similarity == 'euclidean':
# g = model_ops.euclidean_distance(test_feat, support_feat)
# Note that gram matrix g has shape (n_test, n_support)
# soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper
# https://arxiv.org/pdf/1606.04080v1.pdf
# Computes softmax across axis 1, (so sums distances to support set for
# each test entry) to get attention vector
# Shape (n_test, n_support)
attention = tf.nn.softmax(g) # Renormalize
# Weighted sum of support labels
# Shape (n_support, 1)
support_labels = tf.expand_dims(self.support_label_placeholder, 1)
# pred is yhat in eqn (1) of Matching Networks.
# Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,)
pred = tf.squeeze(tf.matmul(attention, support_labels), [1])
# Clip softmax probabilities to range [epsilon, 1-epsilon]
# Shape (n_test,)
pred = tf.clip_by_value(pred, 1e-7, 1. - 1e-7)
# Convert to logit space using inverse sigmoid (logit) function
# logit function: log(pred) - log(1-pred)
# Used to invoke tf.nn.sigmoid_cross_entropy_with_logits
# in Cross Entropy calculation.
# Shape (n_test,)
scores = tf.log(pred) - tf.log(tf.constant(1., dtype=tf.float32) - pred)
return pred, scores
def predict(self, support, test):
"""Makes predictions on test given support.
TODO(rbharath): Does not currently support any transforms.
TODO(rbharath): Only for 1 task at a time currently. Is there a better way?
"""
y_preds = []
for (X_batch, y_batch, w_batch, ids_batch) in test.iterbatches(
self.test_batch_size, deterministic=True):
test_batch = NumpyDataset(X_batch, y_batch, w_batch, ids_batch)
y_pred_batch = self.predict_on_batch(support, test_batch)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def predict_proba(self, support, test):
"""Makes predictions on test given support.
TODO(rbharath): Does not currently support any transforms.
TODO(rbharath): Only for 1 task at a time currently. Is there a better way?
Parameters
----------
support: dc.data.Dataset
The support dataset
test: dc.data.Dataset
The test dataset
"""
y_preds = []
for (X_batch, y_batch, w_batch, ids_batch) in test.iterbatches(
self.test_batch_size, deterministic=True):
test_batch = NumpyDataset(X_batch, y_batch, w_batch, ids_batch)
y_pred_batch = self.predict_proba_on_batch(support, test_batch)
y_preds.append(y_pred_batch)
y_pred = np.concatenate(y_preds)
return y_pred
def predict_on_batch(self, support, test_batch):
"""Make predictions on batch of data."""
n_samples = len(test_batch)
X, y, w, ids = pad_batch(self.test_batch_size, test_batch.X, test_batch.y,
test_batch.w, test_batch.ids)
padded_test_batch = NumpyDataset(X, y, w, ids)
feed_dict = self.construct_feed_dict(padded_test_batch, support)
# Get scores
pred, scores = self.sess.run(
[self.pred_op, self.scores_op], feed_dict=feed_dict)
y_pred_batch = np.round(pred)
# Remove padded elements
y_pred_batch = y_pred_batch[:n_samples]
return y_pred_batch
def predict_proba_on_batch(self, support, test_batch):
"""Make predictions on batch of data."""
n_samples = len(test_batch)
X, y, w, ids = pad_batch(self.test_batch_size, test_batch.X, test_batch.y,
test_batch.w, test_batch.ids)
padded_test_batch = NumpyDataset(X, y, w, ids)
feed_dict = self.construct_feed_dict(padded_test_batch, support)
# Get scores
pred, scores = self.sess.run(
[self.pred_op, self.scores_op], feed_dict=feed_dict)
# pred corresponds to prob(example == 1)
y_pred_batch = np.zeros((n_samples, 2))
# Remove padded elements
pred = pred[:n_samples]
y_pred_batch[:, 1] = pred
y_pred_batch[:, 0] = 1 - pred
return y_pred_batch
def evaluate(self,
dataset,
metric,
n_pos,
n_neg,
n_trials=1000,
exclude_support=True):
"""Evaluate performance on dataset according to metrics
Evaluates the performance of the trained model by sampling supports randomly
for each task in dataset. For each sampled support, the accuracy of the
model with support provided is computed on all data for that task. If
exclude_support is True (by default), the support set is excluded from this
accuracy calculation. exclude_support should be set to false if model's
memorization capacity wants to be evaluated.
Since the accuracy on a task is dependent on the choice of random support,
the evaluation experiment is repeated n_trials times for each task.
(Each task gets n_trials experiments). The computed accuracies
are averaged across trials.
TODO(rbharath): Currently does not support any transformers.
Parameters
----------
dataset: dc.data.Dataset
Dataset to test on.
metrics: dc.metrics.Metric
Evaluation metric.
n_pos: int, optional
Number of positive samples per support.
n_neg: int, optional
Number of negative samples per support.
exclude_support: bool, optional
Whether support set should be excluded when computing model accuracy.
"""
# Get batches
test_tasks = range(len(dataset.get_task_names()))
task_scores = {task: [] for task in test_tasks}
support_generator = SupportGenerator(dataset, n_pos, n_neg, n_trials)
for ind, (task, support) in enumerate(support_generator):
print("Eval sample %d from task %s" % (ind, str(task)))
# TODO(rbharath): Add test for get_task_dataset_minus_support for
# multitask case with missing data...
if exclude_support:
print("Removing support datapoints for eval.")
task_dataset = get_task_dataset_minus_support(dataset, support, task)
else:
print("Keeping support datapoints for eval.")
task_dataset = get_task_dataset(dataset, task)
y_pred = self.predict_proba(support, task_dataset)
task_scores[task].append(
metric.compute_metric(task_dataset.y, y_pred, task_dataset.w))
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in test_tasks:
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
return mean_task_scores, std_task_scores
| mit |
luo66/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 257 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
shyamalschandra/seastar | configure.py | 19 | 26249 | #!/usr/bin/python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, os.path, textwrap, argparse, sys, shlex, subprocess, tempfile, re
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
def get_flags():
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('flags'):
return re.sub(r'^flags\s+: ', '', line).split()
def add_tristate(arg_parser, name, dest, help):
arg_parser.add_argument('--enable-' + name, dest = dest, action = 'store_true', default = None,
help = 'Enable ' + help)
arg_parser.add_argument('--disable-' + name, dest = dest, action = 'store_false', default = None,
help = 'Disable ' + help)
def apply_tristate(var, test, note, missing):
if (var is None) or var:
if test():
return True
elif var == True:
print(missing)
sys.exit(1)
else:
print(note)
return False
return False
#
# dpdk_cflags - fetch the DPDK specific CFLAGS
#
# Run a simple makefile that "includes" the DPDK main makefile and prints the
# MACHINE_CFLAGS value
#
def dpdk_cflags (dpdk_target):
with tempfile.NamedTemporaryFile() as sfile:
dpdk_target = os.path.abspath(dpdk_target)
dpdk_target = re.sub(r'\/+$', '', dpdk_target)
dpdk_sdk_path = os.path.dirname(dpdk_target)
dpdk_target_name = os.path.basename(dpdk_target)
dpdk_arch = dpdk_target_name.split('-')[0]
if args.dpdk:
dpdk_sdk_path = 'dpdk'
dpdk_target = os.getcwd() + '/build/dpdk'
dpdk_target_name = 'x86_64-{}-linuxapp-gcc'.format(dpdk_machine)
dpdk_arch = 'x86_64'
sfile.file.write(bytes('include ' + dpdk_sdk_path + '/mk/rte.vars.mk' + "\n", 'utf-8'))
sfile.file.write(bytes('all:' + "\n\t", 'utf-8'))
sfile.file.write(bytes('@echo $(MACHINE_CFLAGS)' + "\n", 'utf-8'))
sfile.file.flush()
dpdk_cflags = subprocess.check_output(['make', '--no-print-directory',
'-f', sfile.name,
'RTE_SDK=' + dpdk_sdk_path,
'RTE_OUTPUT=' + dpdk_target,
'RTE_TARGET=' + dpdk_target_name,
'RTE_SDK_BIN=' + dpdk_target,
'RTE_ARCH=' + dpdk_arch])
dpdk_cflags_str = dpdk_cflags.decode('utf-8')
dpdk_cflags_str = re.sub(r'\n+$', '', dpdk_cflags_str)
dpdk_cflags_final = ''
return dpdk_cflags_str
def try_compile(compiler, source = '', flags = []):
with tempfile.NamedTemporaryFile() as sfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
return subprocess.call([compiler, '-x', 'c++', '-o', '/dev/null', '-c', sfile.name] + flags,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL) == 0
def try_compile_and_run(compiler, flags, source, env = {}):
mktemp = tempfile.NamedTemporaryFile
with mktemp() as sfile, mktemp(mode='rb') as xfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
xfile.file.close()
if subprocess.call([compiler, '-x', 'c++', '-o', xfile.name, sfile.name] + flags,
stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL) != 0:
return False
e = os.environ.copy()
e.update(env)
env = e
return subprocess.call([xfile.name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, env=env) == 0
def warning_supported(warning, compiler):
# gcc ignores -Wno-x even if it is not supported
adjusted = re.sub('^-Wno-', '-W', warning)
return try_compile(flags = [adjusted], compiler = compiler)
def debug_flag(compiler):
src_with_auto = textwrap.dedent('''\
template <typename T>
struct x { auto f() {} };
x<int> a;
''')
if try_compile(source = src_with_auto, flags = ['-g', '-std=gnu++1y'], compiler = compiler):
return '-g'
else:
print('Note: debug information disabled; upgrade your compiler')
return ''
def sanitize_vptr_flag(compiler):
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67258
if (not try_compile(compiler, flags=['-fsanitize=vptr'])
or try_compile_and_run(compiler, flags=['-fsanitize=undefined', '-fno-sanitize-recover'],
env={'UBSAN_OPTIONS': 'exitcode=1'}, source=textwrap.dedent('''
struct A
{
virtual ~A() {}
};
struct B : virtual A {};
struct C : virtual A {};
struct D : B, virtual C {};
int main()
{
D d;
}
'''))):
return ''
else:
print('-fsanitize=vptr is broken, disabling')
return '-fno-sanitize=vptr'
modes = {
'debug': {
'sanitize': '-fsanitize=address -fsanitize=leak -fsanitize=undefined',
'sanitize_libs': '-lubsan -lasan',
'opt': '-O0 -DDEBUG -DDEBUG_SHARED_PTR -DDEFAULT_ALLOCATOR',
'libs': '',
},
'release': {
'sanitize': '',
'sanitize_libs': '',
'opt': '-O2',
'libs': '',
},
}
tests = [
'tests/fileiotest',
'tests/directory_test',
'tests/linecount',
'tests/echotest',
'tests/l3_test',
'tests/ip_test',
'tests/timertest',
'tests/tcp_test',
'tests/futures_test',
'tests/alloc_test',
'tests/foreign_ptr_test',
'tests/smp_test',
'tests/thread_test',
'tests/thread_context_switch',
'tests/udp_server',
'tests/udp_client',
'tests/blkdiscard_test',
'tests/sstring_test',
'tests/httpd',
'tests/memcached/test_ascii_parser',
'tests/tcp_server',
'tests/tcp_client',
'tests/allocator_test',
'tests/output_stream_test',
'tests/udp_zero_copy',
'tests/shared_ptr_test',
'tests/slab_test',
'tests/fstream_test',
'tests/distributed_test',
'tests/rpc',
'tests/semaphore_test',
'tests/packet_test',
]
apps = [
'apps/httpd/httpd',
'apps/seawreck/seawreck',
'apps/seastar/seastar',
'apps/memcached/memcached',
]
all_artifacts = apps + tests + ['libseastar.a', 'seastar.pc']
arg_parser = argparse.ArgumentParser('Configure seastar')
arg_parser.add_argument('--static', dest = 'static', action = 'store_const', default = '',
const = '-static',
help = 'Static link (useful for running on hosts outside the build environment')
arg_parser.add_argument('--pie', dest = 'pie', action = 'store_true',
help = 'Build position-independent executable (PIE)')
arg_parser.add_argument('--so', dest = 'so', action = 'store_true',
help = 'Build shared object (SO) instead of executable')
arg_parser.add_argument('--mode', action='store', choices=list(modes.keys()) + ['all'], default='all')
arg_parser.add_argument('--with', dest='artifacts', action='append', choices=all_artifacts, default=[])
arg_parser.add_argument('--cflags', action = 'store', dest = 'user_cflags', default = '',
help = 'Extra flags for the C++ compiler')
arg_parser.add_argument('--ldflags', action = 'store', dest = 'user_ldflags', default = '',
help = 'Extra flags for the linker')
arg_parser.add_argument('--compiler', action = 'store', dest = 'cxx', default = 'g++',
help = 'C++ compiler path')
arg_parser.add_argument('--with-osv', action = 'store', dest = 'with_osv', default = '',
help = 'Shortcut for compile for OSv')
arg_parser.add_argument('--enable-dpdk', action = 'store_true', dest = 'dpdk', default = False,
help = 'Enable dpdk (from included dpdk sources)')
arg_parser.add_argument('--dpdk-target', action = 'store', dest = 'dpdk_target', default = '',
help = 'Path to DPDK SDK target location (e.g. <DPDK SDK dir>/x86_64-native-linuxapp-gcc)')
arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', type = int, default = 1,
help = 'Enable(1)/disable(0)compiler debug information generation')
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
libnet = [
'net/proxy.cc',
'net/virtio.cc',
'net/dpdk.cc',
'net/ip.cc',
'net/ethernet.cc',
'net/arp.cc',
'net/native-stack.cc',
'net/ip_checksum.cc',
'net/udp.cc',
'net/tcp.cc',
'net/dhcp.cc',
]
core = [
'core/reactor.cc',
'core/fstream.cc',
'core/posix.cc',
'core/memory.cc',
'core/resource.cc',
'core/scollectd.cc',
'core/app-template.cc',
'core/thread.cc',
'core/dpdk_rte.cc',
'util/conversions.cc',
'net/packet.cc',
'net/posix-stack.cc',
'net/net.cc',
'rpc/rpc.cc',
]
http = ['http/transformers.cc',
'http/json_path.cc',
'http/file_handler.cc',
'http/common.cc',
'http/routes.cc',
'json/json_elements.cc',
'json/formatter.cc',
'http/matcher.cc',
'http/mime_types.cc',
'http/httpd.cc',
'http/reply.cc',
'http/request_parser.rl',
'http/api_docs.cc',
]
boost_test_lib = [
'tests/test-utils.cc',
'tests/test_runner.cc',
]
defines = []
libs = '-laio -lboost_program_options -lboost_system -lstdc++ -lm -lboost_unit_test_framework -lboost_thread -lcryptopp -lrt'
hwloc_libs = '-lhwloc -lnuma -lpciaccess -lxml2 -lz'
xen_used = False
def have_xen():
source = '#include <stdint.h>\n'
source += '#include <xen/xen.h>\n'
source += '#include <xen/sys/evtchn.h>\n'
source += '#include <xen/sys/gntdev.h>\n'
source += '#include <xen/sys/gntalloc.h>\n'
return try_compile(compiler = args.cxx, source = source)
if apply_tristate(args.xen, test = have_xen,
note = 'Note: xen-devel not installed. No Xen support.',
missing = 'Error: required package xen-devel not installed.'):
libs += ' -lxenstore'
defines.append("HAVE_XEN")
libnet += [ 'net/xenfront.cc' ]
core += [
'core/xen/xenstore.cc',
'core/xen/gntalloc.cc',
'core/xen/evtchn.cc',
]
xen_used=True
if xen_used and args.dpdk_target:
print("Error: only xen or dpdk can be used, not both.")
sys.exit(1)
memcache_base = [
'apps/memcached/ascii.rl'
] + libnet + core
deps = {
'libseastar.a' : core + libnet + http,
'seastar.pc': [],
'apps/seastar/seastar': ['apps/seastar/main.cc'] + core,
'apps/httpd/httpd': ['apps/httpd/demo.json', 'apps/httpd/main.cc'] + http + libnet + core,
'apps/memcached/memcached': ['apps/memcached/memcache.cc'] + memcache_base,
'tests/memcached/test_ascii_parser': ['tests/memcached/test_ascii_parser.cc'] + memcache_base + boost_test_lib,
'tests/fileiotest': ['tests/fileiotest.cc'] + core + boost_test_lib,
'tests/directory_test': ['tests/directory_test.cc'] + core,
'tests/linecount': ['tests/linecount.cc'] + core,
'tests/echotest': ['tests/echotest.cc'] + core + libnet,
'tests/l3_test': ['tests/l3_test.cc'] + core + libnet,
'tests/ip_test': ['tests/ip_test.cc'] + core + libnet,
'tests/tcp_test': ['tests/tcp_test.cc'] + core + libnet,
'tests/timertest': ['tests/timertest.cc'] + core,
'tests/futures_test': ['tests/futures_test.cc'] + core + boost_test_lib,
'tests/alloc_test': ['tests/alloc_test.cc'] + core + boost_test_lib,
'tests/foreign_ptr_test': ['tests/foreign_ptr_test.cc'] + core + boost_test_lib,
'tests/semaphore_test': ['tests/semaphore_test.cc'] + core + boost_test_lib,
'tests/smp_test': ['tests/smp_test.cc'] + core,
'tests/thread_test': ['tests/thread_test.cc'] + core + boost_test_lib,
'tests/thread_context_switch': ['tests/thread_context_switch.cc'] + core,
'tests/udp_server': ['tests/udp_server.cc'] + core + libnet,
'tests/udp_client': ['tests/udp_client.cc'] + core + libnet,
'tests/tcp_server': ['tests/tcp_server.cc'] + core + libnet,
'tests/tcp_client': ['tests/tcp_client.cc'] + core + libnet,
'apps/seawreck/seawreck': ['apps/seawreck/seawreck.cc', 'apps/seawreck/http_response_parser.rl'] + core + libnet,
'tests/blkdiscard_test': ['tests/blkdiscard_test.cc'] + core,
'tests/sstring_test': ['tests/sstring_test.cc'] + core,
'tests/httpd': ['tests/httpd.cc'] + http + core + boost_test_lib,
'tests/allocator_test': ['tests/allocator_test.cc', 'core/memory.cc', 'core/posix.cc'],
'tests/output_stream_test': ['tests/output_stream_test.cc'] + core + libnet + boost_test_lib,
'tests/udp_zero_copy': ['tests/udp_zero_copy.cc'] + core + libnet,
'tests/shared_ptr_test': ['tests/shared_ptr_test.cc'] + core,
'tests/slab_test': ['tests/slab_test.cc'] + core,
'tests/fstream_test': ['tests/fstream_test.cc'] + core + boost_test_lib,
'tests/distributed_test': ['tests/distributed_test.cc'] + core,
'tests/rpc': ['tests/rpc.cc'] + core + libnet,
'tests/packet_test': ['tests/packet_test.cc'] + core + libnet,
}
warnings = [
'-Wno-mismatched-tags', # clang-only
]
# The "--with-osv=<path>" parameter is a shortcut for a bunch of other
# settings:
if args.with_osv:
args.so = True
args.hwloc = False
args.user_cflags = (args.user_cflags +
' -DDEFAULT_ALLOCATOR -fvisibility=default -DHAVE_OSV -I' +
args.with_osv + ' -I' + args.with_osv + '/include -I' +
args.with_osv + '/arch/x64')
dpdk_arch_xlat = {
'native': 'native',
'nehalem': 'nhm',
'westmere': 'wsm',
'sandybridge': 'snb',
'ivybridge': 'ivb',
}
dpdk_machine = 'native'
if args.dpdk:
if not os.path.exists('dpdk') or not os.listdir('dpdk'):
raise Exception('--enable-dpdk: dpdk/ is empty. Run "git submodule update --init".')
cflags = args.user_cflags.split()
dpdk_machine = ([dpdk_arch_xlat[cflag[7:]]
for cflag in cflags
if cflag.startswith('-march')] or ['native'])[0]
subprocess.check_call('make -C dpdk RTE_OUTPUT=$PWD/build/dpdk/ config T=x86_64-native-linuxapp-gcc'.format(
dpdk_machine=dpdk_machine),
shell = True)
# adjust configutation to taste
dotconfig = 'build/dpdk/.config'
lines = open(dotconfig, encoding='UTF-8').readlines()
def update(lines, vars):
ret = []
for line in lines:
for var, val in vars.items():
if line.startswith(var + '='):
line = var + '=' + val + '\n'
ret.append(line)
return ret
lines = update(lines, {'CONFIG_RTE_LIBRTE_PMD_BOND': 'n',
'CONFIG_RTE_MBUF_SCATTER_GATHER': 'n',
'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
'CONFIG_RTE_APP_TEST': 'n',
'CONFIG_RTE_TEST_PMD': 'n',
'CONFIG_RTE_MBUF_REFCNT_ATOMIC': 'n',
'CONFIG_RTE_MAX_MEMSEG': '8192',
'CONFIG_RTE_EAL_IGB_UIO': 'n',
'CONFIG_RTE_LIBRTE_KNI': 'n',
'CONFIG_RTE_KNI_KMOD': 'n',
'CONFIG_RTE_LIBRTE_JOBSTATS': 'n',
'CONFIG_RTE_LIBRTE_LPM': 'n',
'CONFIG_RTE_LIBRTE_ACL': 'n',
'CONFIG_RTE_LIBRTE_POWER': 'n',
'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
'CONFIG_RTE_LIBRTE_METER': 'n',
'CONFIG_RTE_LIBRTE_SCHED': 'n',
'CONFIG_RTE_LIBRTE_DISTRIBUTOR': 'n',
'CONFIG_RTE_LIBRTE_REORDER': 'n',
'CONFIG_RTE_LIBRTE_PORT': 'n',
'CONFIG_RTE_LIBRTE_TABLE': 'n',
'CONFIG_RTE_LIBRTE_PIPELINE': 'n',
})
lines += 'CONFIG_RTE_MACHINE={}'.format(dpdk_machine)
open(dotconfig, 'w', encoding='UTF-8').writelines(lines)
args.dpdk_target = os.getcwd() + '/build/dpdk'
if args.dpdk_target:
args.user_cflags = (args.user_cflags +
' -DHAVE_DPDK -I' + args.dpdk_target + '/include ' +
dpdk_cflags(args.dpdk_target) +
' -Wno-error=literal-suffix -Wno-literal-suffix -Wno-invalid-offsetof')
libs += (' -L' + args.dpdk_target + '/lib ')
if args.with_osv:
libs += '-lintel_dpdk -lrt -lm -ldl'
else:
libs += '-Wl,--whole-archive -lrte_pmd_vmxnet3_uio -lrte_pmd_i40e -lrte_pmd_ixgbe -lrte_pmd_e1000 -lrte_pmd_ring -Wl,--no-whole-archive -lrte_hash -lrte_kvargs -lrte_mbuf -lethdev -lrte_eal -lrte_malloc -lrte_mempool -lrte_ring -lrte_cmdline -lrte_cfgfile -lrt -lm -ldl'
warnings = [w
for w in warnings
if warning_supported(warning = w, compiler = args.cxx)]
warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
sanitize_flags = sanitize_vptr_flag(args.cxx)
modes['debug']['sanitize'] += ' ' + sanitize_flags
def have_hwloc():
return try_compile(compiler = args.cxx, source = '#include <hwloc.h>\n#include <numa.h>')
if apply_tristate(args.hwloc, test = have_hwloc,
note = 'Note: hwloc-devel/numactl-devel not installed. No NUMA support.',
missing = 'Error: required packages hwloc-devel/numactl-devel not installed.'):
libs += ' ' + hwloc_libs
defines.append('HAVE_HWLOC')
defines.append('HAVE_NUMA')
if args.so:
args.pie = '-shared'
args.fpie = '-fpic'
elif args.pie:
args.pie = '-pie'
args.fpie = '-fpie'
else:
args.pie = ''
args.fpie = ''
defines = ' '.join(['-D' + d for d in defines])
globals().update(vars(args))
total_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
link_pool_depth = max(int(total_memory / 7e9), 1)
build_modes = modes if args.mode == 'all' else [args.mode]
build_artifacts = all_artifacts if not args.artifacts else args.artifacts
dpdk_sources = []
if args.dpdk:
for root, dirs, files in os.walk('dpdk'):
dpdk_sources += [os.path.join(root, file)
for file in files
if file.endswith('.h') or file.endswith('.c')]
dpdk_sources = ' '.join(dpdk_sources)
outdir = 'build'
buildfile = 'build.ninja'
os.makedirs(outdir, exist_ok = True)
do_sanitize = True
if args.static:
do_sanitize = False
with open(buildfile, 'w') as f:
dpdk_deps = ''
if args.dpdk:
# fake dependencies on dpdk, so that it is built before anything else
dpdk_deps = ' {dpdk_target}/include/rte_eal.h {dpdk_target}/lib/librte_eal.a'.format(dpdk_target=args.dpdk_target)
f.write(textwrap.dedent('''\
configure_args = {configure_args}
builddir = {outdir}
cxx = {cxx}
# we disable _FORTIFY_SOURCE because it generates false positives with longjmp() (core/thread.cc)
cxxflags = -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I. -U_FORTIFY_SOURCE {user_cflags} {warnings} {defines}
ldflags = {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags}
libs = {libs}
pool link_pool
depth = {link_pool_depth}
rule ragel
command = ragel -G2 -o $out $in
description = RAGEL $out
rule gen
command = echo -e $text > $out
description = GEN $out
rule swagger
command = json/json2code.py -f $in -o $out
description = SWAGGER $out
''').format(**globals()))
if args.dpdk:
f.write(textwrap.dedent('''\
rule dpdkmake
command = make -C build/dpdk
build {dpdk_deps} : dpdkmake {dpdk_sources}
''').format(**globals()))
for mode in build_modes:
modeval = modes[mode]
if modeval['sanitize'] and not do_sanitize:
print('Note: --static disables debug mode sanitizers')
modeval['sanitize'] = ''
modeval['sanitize_libs'] = ''
f.write(textwrap.dedent('''\
cxxflags_{mode} = {sanitize} {opt} -I $builddir/{mode}/gen
libs_{mode} = {libs} {sanitize_libs}
rule cxx.{mode}
command = $cxx -MMD -MT $out -MF $out.d $cxxflags_{mode} $cxxflags -c -o $out $in
description = CXX $out
depfile = $out.d
rule link.{mode}
command = $cxx $cxxflags_{mode} $ldflags -o $out $in $libs $libs_{mode}
description = LINK $out
pool = link_pool
rule link_stripped.{mode}
command = $cxx $cxxflags_{mode} -s $ldflags -o $out $in $libs $libs_{mode}
description = LINK (stripped) $out
pool = link_pool
rule ar.{mode}
command = rm -f $out; ar cr $out $in; ranlib $out
description = AR $out
''').format(mode = mode, **modeval))
f.write('build {mode}: phony {artifacts}\n'.format(mode = mode,
artifacts = str.join(' ', ('$builddir/' + mode + '/' + x for x in build_artifacts))))
compiles = {}
ragels = {}
swaggers = {}
for binary in build_artifacts:
srcs = deps[binary]
objs = ['$builddir/' + mode + '/' + src.replace('.cc', '.o')
for src in srcs
if src.endswith('.cc')]
if binary.endswith('.pc'):
vars = modeval.copy()
vars.update(globals())
pc = textwrap.dedent('''\
Name: Seastar
URL: http://seastar-project.org/
Description: Advanced C++ framework for high-performance server applications on modern hardware.
Version: 1.0
Libs: -L{srcdir}/{builddir} -Wl,--whole-archive -lseastar -Wl,--no-whole-archive {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags} {libs} {sanitize_libs}
Cflags: -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I{srcdir} -I{srcdir}/{builddir}/gen {user_cflags} {warnings} {defines} {sanitize} {opt}
''').format(builddir = 'build/' + mode, srcdir = os.getcwd(), **vars)
f.write('build $builddir/{}/{}: gen\n text = {}\n'.format(mode, binary, repr(pc)))
elif binary.endswith('.a'):
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
else:
if binary.startswith('tests/'):
# Our code's debugging information is huge, and multiplied
# by many tests yields ridiculous amounts of disk space.
# So we strip the tests by default; The user can very
# quickly re-link the test unstripped by adding a "_g"
# to the test name, e.g., "ninja build/release/testname_g"
f.write('build $builddir/{}/{}: link_stripped.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
f.write('build $builddir/{}/{}_g: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
else:
f.write('build $builddir/{}/{}: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
for src in srcs:
if src.endswith('.cc'):
obj = '$builddir/' + mode + '/' + src.replace('.cc', '.o')
compiles[obj] = src
elif src.endswith('.rl'):
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
ragels[hh] = src
elif src.endswith('.json'):
hh = '$builddir/' + mode + '/gen/' + src + '.hh'
swaggers[hh] = src
else:
raise Exception('No rule for ' + src)
for obj in compiles:
src = compiles[obj]
gen_headers = list(ragels.keys()) + list(swaggers.keys())
f.write('build {}: cxx.{} {} || {} \n'.format(obj, mode, src, ' '.join(gen_headers) + dpdk_deps))
for hh in ragels:
src = ragels[hh]
f.write('build {}: ragel {}\n'.format(hh, src))
for hh in swaggers:
src = swaggers[hh]
f.write('build {}: swagger {}\n'.format(hh,src))
f.write(textwrap.dedent('''\
rule configure
command = python3 configure.py $configure_args
generator = 1
build build.ninja: configure | configure.py
rule cscope
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
description = CSCOPE
build cscope: cscope
default {modes_list}
''').format(modes_list = ' '.join(build_modes), **globals()))
| apache-2.0 |
mlflow/mlflow | examples/shap/multiclass_classification.py | 1 | 1051 | import os
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
from mlflow.tracking import MlflowClient
from mlflow.artifacts import download_artifacts
# prepare training data
X, y = load_iris(return_X_y=True, as_frame=True)
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(model.predict_proba, X)
# list artifacts
client = MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = download_artifacts(run_id=run.info.run_id, artifact_path=artifact_path)
base_values = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(base_values[0], shap_values[0, 0, :], X.iloc[0, :], matplotlib=True)
| apache-2.0 |
elkingtonmcb/h2o-2 | py/testdir_single_jvm/test_ddply_plot.py | 8 | 9513 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_gbm, h2o_jobs as h2j, h2o_import
import h2o_exec as h2e, h2o_util
import math
print "Copy a version of this to a two cloud test. different failure mode"
DO_PLOT = True
COL = 1
PHRASE = "func1"
FUNC_PHRASE = "func1=function(x){max(x[,%s])}" % COL
REPEAT = 20
DO_KNOWN_FAIL = False
DO_APPEND_KNOWN_FAIL2 = True
DO_REALS = False
CLOUD_SIZE = 1
initList = [
(None, FUNC_PHRASE),
# (None, "func2=function(x){a=3;nrow(x[,%s])*a}" % COL),
# (None, "func3=function(x){apply(x[,%s],2,sum)/nrow(x[,%s])}" % (COL, col) ),
# (None, "function(x) { cbind( mean(x[,1]), mean(x[,%s]) ) }" % COL),
# (None, "func4=function(x) { mean( x[,%s]) }" % COL),
# (None, "func5=function(x) { sd( x[,%s]) }" % COL),
# (None, "func6=function(x) { quantile(x[,%s] , c(0.9) ) }" % COL),
]
print "Data is all integers, minInt to maxInt..so it shouldn't have fp roundoff errors while summing the row counts I use?"
def write_syn_dataset(csvPathname, rowCount, colCount, minInt, maxInt, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
if DO_REALS:
for j in range(colCount):
# maybe do a significatly smaller range than min/max ints.
# divide by pi to get some non-integerness
ri = r1.randint(minInt,maxInt) / math.pi
# make it a real?
rowData.append("%+e" % ri)
else:
for j in range(colCount):
# maybe do a significatly smaller range than min/max ints.
ri = r1.randint(minInt,maxInt)
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(CLOUD_SIZE,java_heap_GB=12/CLOUD_SIZE)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_ddply_plot(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
if DO_KNOWN_FAIL:
tryList = [
(1000000, 5, 'cD', 0, 320, 30),
]
else:
tryList = [
# (1000000, 5, 'cD', 0, 10, 30),
(1000000, 5, 'cD', 0, 20, 30),
# (1000000, 5, 'cD', 0, 40, 30),
(1000000, 5, 'cD', 0, 50, 30),
# (1000000, 5, 'cD', 0, 80, 30),
(1000000, 5, 'cD', 0, 160, 30),
# fails..don't do
# (1000000, 5, 'cD', 0, 320, 30),
# (1000000, 5, 'cD', 0, 320, 30),
# starts to fail here. too many groups?
# (1000000, 5, 'cD', 0, 640, 30),
# (1000000, 5, 'cD', 0, 1280, 30),
]
if DO_APPEND_KNOWN_FAIL2:
tryList.append(
(1000000, 5, 'cD', 0, 160, 30),
)
tryList.append(
(1000000, 5, 'cD', 0, 320, 30),
)
### h2b.browseTheCloud()
xList = []
eList = []
fList = []
trial = 0
for (rowCount, colCount, hex_key, minInt, maxInt, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
# csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
if DO_KNOWN_FAIL:
# csvFilename = 'syn_binary_1000000x5.csv.gz' # fails
# csvFilename = 'a1' # fails
csvFilename = "syn_ddply_1Mx5_0_320.gz"
bucket = "home-0xdiag-datasets"
csvPathname = "standard/" + csvFilename
minInt = 0
maxInt = 320
else:
bucket = None
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname, "with range", (maxInt-minInt)+1
write_syn_dataset(csvPathname, rowCount, colCount, minInt, maxInt, SEEDPERFILE)
for lll in range(1):
# PARSE train****************************************
hexKey = 'r.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', hex_key=hexKey)
inspect = h2o_cmd.runInspect(key=hexKey)
missingValuesList = h2o_cmd.infoFromInspect(inspect, csvFilename)
self.assertEqual(missingValuesList, [], "a1 should have no NAs in parsed dataset: %s" % missingValuesList)
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=60)
#*****************************************************************************************
# two columns. so worse case every combination of each possible value
# only true if enough rows (more than the range?)
maxExpectedGroups = ((maxInt - minInt) + 1) ** 2
# do it twice..to get the optimal cached delay for time?
execExpr = "a1 = ddply(r.hex, c(1,2), " + PHRASE + ")"
start = time.time()
(execResult, result) = h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=90)
groups = execResult['num_rows']
# this is a coarse comparision, statistically not valid for small rows, and certain ranges?
h2o_util.assertApproxEqual(groups, maxExpectedGroups, rel=0.2,
msg="groups %s isn't close to expected amount %s, minInt: %s maxInt: %s" % (groups, maxExpectedGroups, minInt, maxInt))
ddplyElapsed = time.time() - start
print "ddplyElapsed:", ddplyElapsed
print "execResult", h2o.dump_json(execResult)
a1dump = h2o_cmd.runInspect(key="a1")
print "a1", h2o.dump_json(a1dump)
# should never have any NAs in this result
missingValuesList = h2o_cmd.infoFromInspect(a1dump, "a1")
self.assertEqual(missingValuesList, [], "a1 should have no NAs: %s trial: %s" % (missingValuesList, trial))
#*****************************************************************************************
execExpr = "a2 = ddply(r.hex, c(1,2), " + PHRASE + ")"
start = time.time()
(execResult, result) = h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=90)
groups = execResult['num_rows']
# this is a coarse comparision, statistically not valid for small rows, and certain ranges?
h2o_util.assertApproxEqual(groups, maxExpectedGroups, rel=0.2,
msg="groups %s isn't close to expected amount %s, minInt: %s maxInt: %s" % (groups, maxExpectedGroups, minInt, maxInt))
ddplyElapsed = time.time() - start
print "ddplyElapsed:", ddplyElapsed
print "execResult", h2o.dump_json(execResult)
a2dump = h2o_cmd.runInspect(key="a2")
print "a2", h2o.dump_json(a2dump)
# should never have any NAs in this result
missingValuesList = h2o_cmd.infoFromInspect(a2dump, "a2")
self.assertEqual(missingValuesList, [], "a2 should have no NAs: %s trial: %s" % (missingValuesList, trial))
#*****************************************************************************************
# should be same answer in both cases
execExpr = "sum(a1!=a2)==0"
(execResult, result) = h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=90)
execExpr = "s=c(0); s=(a1!=a2)"
(execResult1, result1) = h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=120)
print "execResult", h2o.dump_json(execResult)
#*****************************************************************************************
# should never have any NAs in this result
sdump = h2o_cmd.runInspect(key="s")
print "s", h2o.dump_json(sdump)
self.assertEqual(result, 1, "a1 and a2 weren't equal? Maybe ddply can vary execution order (fp error? so multiple ddply() can have different answer. %s %s %s" % (FUNC_PHRASE, result, h2o.dump_json(execResult)))
# xList.append(ntrees)
trial += 1
# this is the biggest it might be ..depends on the random combinations
# groups = ((maxInt - minInt) + 1) ** 2
xList.append(groups)
eList.append(ddplyElapsed)
fList.append(ddplyElapsed)
if DO_PLOT:
xLabel = 'groups'
eLabel = 'ddplyElapsed'
fLabel = 'ddplyElapsed'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
etalab/dactylo | dactylo/controllers/websockets.py | 1 | 3223 | # -*- coding: utf-8 -*-
# Dactylo -- A datasets activity streams logger
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/dactylo
#
# This file is part of Dactylo.
#
# Dactylo is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Dactylo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Controllers for websockets"""
import json
import webob
import ws4py.server.wsgiutils
import ws4py.websocket
from .. import contexts, model, urls, wsgihelpers
#class WebSocketEmitter(ws4py.websocket.WebSocket):
# def closed(self, code, reason = None):
# try:
# model.websocket_clients.remove(self)
# except ValueError:
# # Client is missing from list.
# pass
# def opened(self):
# model.websocket_clients.append(self)
#websocket_emitter_app = ws4py.server.wsgiutils.WebSocketWSGIApplication(handler_cls = WebSocketEmitter)
class WebSocketMetricsEmitter(ws4py.websocket.WebSocket):
def closed(self, code, reason = None):
try:
model.websocket_metrics_clients.remove(self)
except ValueError:
# Client is missing from list.
pass
def opened(self):
model.websocket_metrics_clients.append(self)
message = unicode(json.dumps(model.metrics, encoding = 'utf-8', ensure_ascii = False, indent = 2))
self.send(message)
websocket_metrics_emitter_app = ws4py.server.wsgiutils.WebSocketWSGIApplication(handler_cls = WebSocketMetricsEmitter)
#def api1_listen(environ, start_response):
# req = webob.Request(environ)
## ctx = contexts.Ctx(req)
## headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
# assert req.method == 'GET'
## params = req.GET
## inputs = dict(
## first_key = params.get('first_key'),
## keys = params.get('keys'),
## limit = params.get('limit'),
## values = params.get('values'),
## )
# return websocket_emitter_app(environ, start_response)
def api1_metrics(environ, start_response):
req = webob.Request(environ)
ctx = contexts.Ctx(req)
# headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx)
assert req.method == 'GET'
try:
return websocket_metrics_emitter_app(environ, start_response)
except ws4py.server.wsgiutils.HandshakeError as error:
return wsgihelpers.bad_request(ctx, explanation = ctx._(u'WebSocket Handshake Error: {0}').format(error))
def route_api1_class(environ, start_response):
router = urls.make_router(
# ('GET', '^/?$', api1_listen),
('GET', '^/metrics/?$', api1_metrics),
)
return router(environ, start_response)
| agpl-3.0 |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/categorical.py | 40 | 4795 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements preprocessing transformers for categorical variables (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-bad-import-order
from . import categorical_vocabulary
from ..learn_io.data_feeder import setup_processor_data_feeder
# pylint: enable=g-bad-import-order
class CategoricalProcessor(object):
"""Maps documents to sequences of word ids.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
As a common convention, Nan values are handled as unknown tokens.
Both float('nan') and np.nan are accepted.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data for sequence '
'processing.')
def __init__(self, min_frequency=0, share=False, vocabularies=None):
"""Initializes a CategoricalProcessor instance.
Args:
min_frequency: Minimum frequency of categories in the vocabulary.
share: Share vocabulary between variables.
vocabularies: list of CategoricalVocabulary objects for each variable in
the input dataset.
Attributes:
vocabularies_: list of CategoricalVocabulary objects.
"""
self.min_frequency = min_frequency
self.share = share
self.vocabularies_ = vocabularies
def freeze(self, freeze=True):
"""Freeze or unfreeze all vocabularies.
Args:
freeze: Boolean, indicate if vocabularies should be frozen.
"""
for vocab in self.vocabularies_:
vocab.freeze(freeze)
def fit(self, x, unused_y=None):
"""Learn a vocabulary dictionary of all categories in `x`.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
x = setup_processor_data_feeder(x)
for row in x:
# Create vocabularies if not given.
if self.vocabularies_ is None:
# If not share, one per column, else one shared across.
if not self.share:
self.vocabularies_ = [
categorical_vocabulary.CategoricalVocabulary() for _ in row
]
else:
vocab = categorical_vocabulary.CategoricalVocabulary()
self.vocabularies_ = [vocab for _ in row]
for idx, value in enumerate(row):
# Nans are handled as unknowns.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
continue
self.vocabularies_[idx].add(value)
if self.min_frequency > 0:
for vocab in self.vocabularies_:
vocab.trim(self.min_frequency)
self.freeze()
return self
def fit_transform(self, x, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of categories.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples]. Category-id matrix.
"""
self.fit(x)
return self.transform(x)
def transform(self, x):
"""Transform documents to category-id matrix.
Converts categories to ids give fitted vocabulary from `fit` or
one provided in the constructor.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
Yields:
x: iterable, [n_samples]. Category-id matrix.
"""
self.freeze()
x = setup_processor_data_feeder(x)
for row in x:
output_row = []
for idx, value in enumerate(row):
# Return <UNK> when it's Nan.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
output_row.append(0)
continue
output_row.append(self.vocabularies_[idx].get(value))
yield np.array(output_row, dtype=np.int64)
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/examples/tutorials/layers/cnn_mnist.py | 42 | 5711 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
diekhans/ga4gh-server | scripts/prepare_compliance_data.py | 4 | 6553 | """
A script that takes the compliance dataset (the released version
of which is at https://github.com/ga4gh/compliance/tree/master/test-data)
and turns it into a directory bundle of binary and JSON files suitable
for use by the reference server.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import shutil
import argparse
import os
import sys
import glob
import pysam
import utils
class ComplianceDataMunger(object):
def __init__(self, args):
self.inputDirectory = args.inputDirectory
self.outputDirectory = args.outputDirectory
# get all the reference files (they'll be the ones with .fa extension)
self.referenceFiles = map(
os.path.basename, glob.glob(
os.path.join(self.inputDirectory, "*.fa")))
self.refsetsDirectory = os.path.join(
self.outputDirectory, "referenceSets")
self.hg37Directory = os.path.join(self.refsetsDirectory, "hg37")
# datasets
self.datasetsDirectory = os.path.join(self.outputDirectory, "datasets")
self.readFiles = map(
os.path.basename, glob.glob(
os.path.join(self.inputDirectory, "*.sam")))
self.variantFiles = map(
os.path.basename, glob.glob(
os.path.join(self.inputDirectory, "*.vcf")))
self.datasets = [d for d in set([p.split('_')[0] for p in
self.readFiles + self.variantFiles])]
self.datasetReads = dict()
self.datasetVariants = dict()
for ds in self.datasets:
self.datasetReads[ds] = [r for r in
self.readFiles if r.startswith(ds)]
# Variants themselves are split into groups,
# based on second part of the _ split:
for ds in self.datasets:
self.datasetVariants[ds] = dict()
# only those variants inside this dataset
dsvlist = [v for v in self.variantFiles if v.startswith(ds)]
# create nested dictionary based on group belonging
for dsv in dsvlist:
dsvGroup = dsv.split('_')[1]
self.datasetVariants[ds][dsvGroup] = \
self.datasetVariants[ds].get(dsvGroup, []) + [dsv]
self.datasetDirs = [os.path.join(self.outputDirectory, ds)
for ds in self.datasets]
def run(self):
if not os.path.exists(self.outputDirectory):
os.makedirs(self.outputDirectory)
# Clean out, make and re-populate references directory
# For now, assume a single, statically-named referenceSet
print("Converting references...", file=sys.stderr)
shutil.rmtree(self.refsetsDirectory, ignore_errors=True)
os.makedirs(self.refsetsDirectory)
shutil.copy(
os.path.join(self.inputDirectory, "referenceset_hg37.json"),
os.path.join(self.refsetsDirectory, "hg37.json"))
os.makedirs(self.hg37Directory)
for refFile in self.referenceFiles:
refBase = os.path.splitext(refFile)[0]
destFastaFilename = os.path.join(
self.hg37Directory, refBase) + ".fa"
shutil.copy(os.path.join(self.inputDirectory, refBase) + ".fa",
destFastaFilename)
pysam.tabix_compress(destFastaFilename, destFastaFilename + ".gz")
refFasta = pysam.FastaFile(destFastaFilename + ".gz")
refFasta.close()
os.remove(destFastaFilename)
shutil.copy(
os.path.join(self.inputDirectory, refBase) + ".json",
os.path.join(self.hg37Directory, refBase) + ".json")
# Clean out, make and repopulate dataset directories
shutil.rmtree(self.datasetsDirectory, ignore_errors=True)
os.makedirs(self.datasetsDirectory)
for ds in self.datasets:
dsdir = os.path.join(self.datasetsDirectory, ds)
os.makedirs(dsdir)
# Reads
print("Converting reads...", file=sys.stderr)
dsReadsdir = os.path.join(dsdir, "reads")
os.makedirs(dsReadsdir)
for readFile in self.datasetReads[ds]:
destFile = os.path.join(
dsReadsdir,
readFile.split('_')[1].split('.')[0]) + ".bam"
readSrc = pysam.AlignmentFile(
os.path.join(self.inputDirectory, readFile), "r")
readDest = pysam.AlignmentFile(destFile, "wb",
header=readSrc.header)
destFilePath = readDest.filename
for readData in readSrc:
readDest.write(readData)
readDest.close()
readSrc.close()
pysam.index(destFilePath)
# Variants
print("Converting variants...", file=sys.stderr)
dsVariantsdir = os.path.join(dsdir, "variants")
os.makedirs(dsVariantsdir)
for vgroup in self.datasetVariants[ds].keys():
vgroupdir = os.path.join(dsVariantsdir, vgroup)
os.makedirs(vgroupdir)
for variantFile in self.datasetVariants[ds][vgroup]:
destFile = os.path.join(
vgroupdir, variantFile.split('_')[2])
shutil.copy(
os.path.join(
self.inputDirectory, variantFile), destFile)
# Pysam's tabix_index automatically compresses the file
# in place, creates a tabix index.
pysam.tabix_index(destFile, preset="vcf")
print("done converting compliance data.", file=sys.stderr)
@utils.Timed()
def main():
parser = argparse.ArgumentParser(
description="Script to generate data bundle from a locally stored "
"(and possibly locally edited) version of the compliance dataset.")
parser.add_argument(
"--outputDirectory", "-o", default="ga4gh-compliance-data",
help="The directory to output the server-ready data bundle to.")
parser.add_argument(
"--inputDirectory", "-i",
help="Path to local directory containing compliance dataset.",
default='.')
parser.add_argument('--verbose', '-v', action='count', default=0)
args = parser.parse_args()
cdm = ComplianceDataMunger(args)
cdm.run()
if __name__ == "__main__":
main()
| apache-2.0 |
elkingtonmcb/h2o-2 | py/testdir_multi_jvm/test_GLM2_covtype_exec.py | 9 | 2344 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_exec(self):
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
hex_key = 'covtype.hex'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=30)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = "54"
h2o_cmd.runExec(str='%s[,55] = %s[,55]==1' % (hex_key, hex_key))
# L2
kwargs = {
'response': y,
'family': 'binomial',
'n_folds': 0,
'max_iter': max_iter,
'beta_epsilon': 1e-3}
timeoutSecs = 120
start = time.time()
kwargs.update({'alpha': 0, 'lambda': 0})
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L2) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (Elastic) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# L1
kwargs.update({'alpha': 1, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L1) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
glouppe/scikit-learn | examples/plot_kernel_ridge_regression.py | 14 | 6227 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
chetan51/nupic | examples/opf/experiments/missing_record/make_datasets.py | 9 | 4817 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets for the multi-step prediction experiments
"""
import os
import random
import datetime
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
###########################################################################
def _generateSimple(filename="simple.csv", numSequences=1, elementsPerSeq=3,
numRepeats=10):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
At the end of the dataset, we introduce missing records so that test
code can insure that the model didn't get confused by them.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('timestamp', 'datetime', 'T'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
# Put 1 hour between each record
timestamp = datetime.datetime(year=2012, month=1, day=1, hour=0, minute=0,
second=0)
timeDelta = datetime.timedelta(hours=1)
# Write out the sequences without missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Now, write some out with missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Write out some more of the sequences *without* missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
outFile.close()
##############################################################################
if __name__ == '__main__':
helpString = \
"""%prog [options]
Generate artificial datasets for testing multi-step prediction """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the sample datasets
_generateSimple('simple_0.csv', numSequences=1, elementsPerSeq=3,
numRepeats=10)
| gpl-3.0 |
kylerbrown/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 113 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
olt/mapproxy | mapproxy/config/loader.py | 1 | 80871 | # This file is part of the MapProxy project.
# Copyright (C) 2010-2016 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration loading and system initializing.
"""
from __future__ import division
import os
import sys
import hashlib
import warnings
from copy import deepcopy, copy
from functools import partial
import logging
log = logging.getLogger('mapproxy.config')
from mapproxy.config import load_default_config, finish_base_config, defaults
from mapproxy.config.validator import validate_references
from mapproxy.config.spec import validate_options
from mapproxy.util.py import memoize
from mapproxy.util.ext.odict import odict
from mapproxy.util.yaml import load_yaml_file, YAMLError
from mapproxy.util.fs import find_exec
from mapproxy.compat.modules import urlparse
from mapproxy.compat import string_type, iteritems
class ConfigurationError(Exception):
pass
class ProxyConfiguration(object):
def __init__(self, conf, conf_base_dir=None, seed=False, renderd=False):
self.configuration = conf
self.seed = seed
self.renderd = renderd
if conf_base_dir is None:
conf_base_dir = os.getcwd()
self.load_globals(conf_base_dir=conf_base_dir)
self.load_grids()
self.load_caches()
self.load_sources()
self.load_wms_root_layer()
self.load_tile_layers()
self.load_services()
def load_globals(self, conf_base_dir):
self.globals = GlobalConfiguration(conf_base_dir=conf_base_dir,
conf=self.configuration.get('globals') or {},
context=self)
def load_grids(self):
self.grids = {}
grid_configs = dict(defaults.grids)
grid_configs.update(self.configuration.get('grids') or {})
for grid_name, grid_conf in iteritems(grid_configs):
grid_conf.setdefault('name', grid_name)
self.grids[grid_name] = GridConfiguration(grid_conf, context=self)
def load_caches(self):
self.caches = odict()
caches_conf = self.configuration.get('caches')
if not caches_conf: return
if isinstance(caches_conf, list):
caches_conf = list_of_dicts_to_ordered_dict(caches_conf)
for cache_name, cache_conf in iteritems(caches_conf):
cache_conf['name'] = cache_name
self.caches[cache_name] = CacheConfiguration(conf=cache_conf, context=self)
def load_sources(self):
self.sources = SourcesCollection()
for source_name, source_conf in iteritems((self.configuration.get('sources') or {})):
self.sources[source_name] = SourceConfiguration.load(conf=source_conf, context=self)
def load_tile_layers(self):
self.layers = odict()
layers_conf = deepcopy(self._layers_conf_dict())
if layers_conf is None: return
layers = self._flatten_layers_conf_dict(layers_conf)
for layer_name, layer_conf in iteritems(layers):
layer_conf['name'] = layer_name
self.layers[layer_name] = LayerConfiguration(conf=layer_conf, context=self)
def _legacy_layers_conf_dict(self):
"""
Read old style layer configuration with a dictionary where
the key is the layer name. Optionally: a list an each layer
is wrapped in such dictionary.
::
layers:
foo:
title: xxx
sources: []
bar:
title: xxx
sources: []
or
::
layers:
- foo:
title: xxx
sources: []
- bar:
title: xxx
sources: []
"""
warnings.warn('old layer configuration syntax is deprecated since 1.4.0. '
'use list of dictionaries as documented', RuntimeWarning)
layers = []
layers_conf = self.configuration.get('layers')
if not layers_conf: return None # TODO config error
if isinstance(layers_conf, list):
layers_conf = list_of_dicts_to_ordered_dict(layers_conf)
for layer_name, layer_conf in iteritems(layers_conf):
layer_conf['name'] = layer_name
layers.append(layer_conf)
return dict(title=None, layers=layers)
def _layers_conf_dict(self):
"""
Returns (recursive) layer configuration as a dictionary
in unified structure:
::
{
title: 'xxx', # required, might be None
name: 'xxx', # optional
# sources or layers or both are required
sources: [],
layers: [
{..., ...} # more layers like this
]
}
Multiple layers will be wrapped in an unnamed root layer, if the
first level starts with multiple layers.
"""
layers_conf = self.configuration.get('layers')
if layers_conf is None: return
if isinstance(layers_conf, list):
if isinstance(layers_conf[0], dict) and len(layers_conf[0].keys()) == 1:
# looks like ordered legacy config
layers_conf = self._legacy_layers_conf_dict()
elif len(layers_conf) == 1 and (
'layers' in layers_conf[0]
or 'sources' in layers_conf[0]
or 'tile_sources' in layers_conf[0]):
# single root layer in list -> remove list
layers_conf = layers_conf[0]
else:
# layer list without root -> wrap in root layer
layers_conf = dict(title=None, layers=layers_conf)
if len(set(layers_conf.keys()) &
set('layers name title sources'.split())) < 2:
# looks like unordered legacy config
layers_conf = self._legacy_layers_conf_dict()
return layers_conf
def _flatten_layers_conf_dict(self, layers_conf, _layers=None):
"""
Returns a dictionary with all layers that have a name and sources.
Flattens the layer tree.
"""
layers = _layers if _layers is not None else odict()
if 'layers' in layers_conf:
for layer in layers_conf.pop('layers'):
self._flatten_layers_conf_dict(layer, layers)
if 'name' in layers_conf and ('sources' in layers_conf or 'tile_sources' in layers_conf):
layers[layers_conf['name']] = layers_conf
return layers
def load_wms_root_layer(self):
self.wms_root_layer = None
layers_conf = self._layers_conf_dict()
if layers_conf is None: return
self.wms_root_layer = WMSLayerConfiguration(layers_conf, context=self)
def load_services(self):
self.services = ServiceConfiguration(self.configuration.get('services', {}), context=self)
def configured_services(self):
with self:
return self.services.services()
def __enter__(self):
# push local base_config onto config stack
import mapproxy.config.config
mapproxy.config.config._config.push(self.base_config)
def __exit__(self, type, value, traceback):
# pop local base_config from config stack
import mapproxy.config.config
mapproxy.config.config._config.pop()
@property
def base_config(self):
return self.globals.base_config
def config_files(self):
"""
Returns a dictionary with all configuration filenames and there timestamps.
Contains any included files as well (see `base` option).
"""
return self.configuration.get('__config_files__', {})
def list_of_dicts_to_ordered_dict(dictlist):
"""
>>> d = list_of_dicts_to_ordered_dict([{'a': 1}, {'b': 2}, {'c': 3}])
>>> list(d.items())
[('a', 1), ('b', 2), ('c', 3)]
"""
result = odict()
for d in dictlist:
for k, v in iteritems(d):
result[k] = v
return result
class ConfigurationBase(object):
"""
Base class for all configurations.
"""
defaults = {}
def __init__(self, conf, context):
"""
:param conf: the configuration part for this configurator
:param context: the complete proxy configuration
:type context: ProxyConfiguration
"""
self.conf = conf
self.context = context
for k, v in iteritems(self.defaults):
if k not in self.conf:
self.conf[k] = v
class GridConfiguration(ConfigurationBase):
@memoize
def tile_grid(self):
from mapproxy.grid import tile_grid
if 'base' in self.conf:
base_grid_name = self.conf['base']
if not base_grid_name in self.context.grids:
raise ConfigurationError('unknown base %s for grid %s' % (base_grid_name, self.conf['name']))
conf = self.context.grids[base_grid_name].conf.copy()
conf.update(self.conf)
conf.pop('base')
self.conf = conf
else:
conf = self.conf
align_with = None
if 'align_resolutions_with' in self.conf:
align_with_grid_name = self.conf['align_resolutions_with']
align_with = self.context.grids[align_with_grid_name].tile_grid()
tile_size = self.context.globals.get_value('tile_size', conf,
global_key='grid.tile_size')
conf['tile_size'] = tuple(tile_size)
tile_size = tuple(tile_size)
stretch_factor = self.context.globals.get_value('stretch_factor', conf,
global_key='image.stretch_factor')
max_shrink_factor = self.context.globals.get_value('max_shrink_factor', conf,
global_key='image.max_shrink_factor')
if conf.get('origin') is None:
log.warn('grid %s does not have an origin. default origin will change from sw (south/west) to nw (north-west) with MapProxy 2.0',
conf['name'],
)
grid = tile_grid(
name=conf['name'],
srs=conf.get('srs'),
tile_size=tile_size,
min_res=conf.get('min_res'),
max_res=conf.get('max_res'),
res=conf.get('res'),
res_factor=conf.get('res_factor', 2.0),
threshold_res=conf.get('threshold_res'),
bbox=conf.get('bbox'),
bbox_srs=conf.get('bbox_srs'),
num_levels=conf.get('num_levels'),
stretch_factor=stretch_factor,
max_shrink_factor=max_shrink_factor,
align_with=align_with,
origin=conf.get('origin')
)
return grid
class GlobalConfiguration(ConfigurationBase):
def __init__(self, conf_base_dir, conf, context):
ConfigurationBase.__init__(self, conf, context)
self.base_config = load_default_config()
self._copy_conf_values(self.conf, self.base_config)
self.base_config.conf_base_dir = conf_base_dir
finish_base_config(self.base_config)
self.image_options = ImageOptionsConfiguration(self.conf.get('image', {}), context)
self.renderd_address = self.get_value('renderd.address')
def _copy_conf_values(self, d, target):
for k, v in iteritems(d):
if v is None: continue
if (hasattr(v, 'iteritems') or hasattr(v, 'items')) and k in target:
self._copy_conf_values(v, target[k])
else:
target[k] = v
def get_value(self, key, local={}, global_key=None, default_key=None):
result = dotted_dict_get(key, local)
if result is None:
result = dotted_dict_get(global_key or key, self.conf)
if result is None:
result = dotted_dict_get(default_key or global_key or key, self.base_config)
return result
def get_path(self, key, local, global_key=None, default_key=None):
value = self.get_value(key, local, global_key, default_key)
if value is not None:
value = self.abspath(value)
return value
def abspath(self, path):
return os.path.join(self.base_config.conf_base_dir, path)
default_image_options = {
}
class ImageOptionsConfiguration(ConfigurationBase):
def __init__(self, conf, context):
ConfigurationBase.__init__(self, conf, context)
self._init_formats()
def _init_formats(self):
self.formats = {}
formats_config = default_image_options.copy()
for format, conf in iteritems(self.conf.get('formats', {})):
if format in formats_config:
tmp = formats_config[format].copy()
tmp.update(conf)
conf = tmp
if 'resampling_method' in conf:
conf['resampling'] = conf.pop('resampling_method')
if 'encoding_options' in conf:
self._check_encoding_options(conf['encoding_options'])
if 'merge_method' in conf:
warnings.warn('merge_method now defaults to composite. option no longer required',
DeprecationWarning)
formats_config[format] = conf
for format, conf in iteritems(formats_config):
if 'format' not in conf and format.startswith('image/'):
conf['format'] = format
self.formats[format] = conf
def _check_encoding_options(self, options):
if not options:
return
options = options.copy()
jpeg_quality = options.pop('jpeg_quality', None)
if jpeg_quality and not isinstance(jpeg_quality, int):
raise ConfigurationError('jpeg_quality is not an integer')
quantizer = options.pop('quantizer', None)
if quantizer and quantizer not in ('fastoctree', 'mediancut'):
raise ConfigurationError('unknown quantizer')
if options:
raise ConfigurationError('unknown encoding_options: %r' % options)
def image_opts(self, image_conf, format):
from mapproxy.image.opts import ImageOptions
if not image_conf:
image_conf = {}
conf = {}
if format in self.formats:
conf = self.formats[format].copy()
resampling = image_conf.get('resampling_method') or conf.get('resampling')
if resampling is None:
resampling = self.context.globals.get_value('image.resampling_method', {})
transparent = image_conf.get('transparent')
opacity = image_conf.get('opacity')
img_format = image_conf.get('format')
colors = image_conf.get('colors')
mode = image_conf.get('mode')
encoding_options = image_conf.get('encoding_options')
if 'merge_method' in image_conf:
warnings.warn('merge_method now defaults to composite. option no longer required',
DeprecationWarning)
self._check_encoding_options(encoding_options)
# only overwrite default if it is not None
for k, v in iteritems(dict(transparent=transparent, opacity=opacity, resampling=resampling,
format=img_format, colors=colors, mode=mode, encoding_options=encoding_options,
)):
if v is not None:
conf[k] = v
if 'format' not in conf and format and format.startswith('image/'):
conf['format'] = format
# caches shall be able to store png and jpeg tiles with mixed format
if format == 'mixed':
conf['format'] = format
# force 256 colors for image.paletted for backwards compat
paletted = self.context.globals.get_value('image.paletted', self.conf)
if conf.get('colors') is None and 'png' in conf.get('format', '') and paletted:
conf['colors'] = 256
opts = ImageOptions(**conf)
return opts
def dotted_dict_get(key, d):
"""
>>> dotted_dict_get('foo', {'foo': {'bar': 1}})
{'bar': 1}
>>> dotted_dict_get('foo.bar', {'foo': {'bar': 1}})
1
>>> dotted_dict_get('bar', {'foo': {'bar': 1}})
"""
parts = key.split('.')
try:
while parts and d:
d = d[parts.pop(0)]
except KeyError:
return None
if parts: # not completely resolved
return None
return d
class SourcesCollection(dict):
"""
Collection of SourceConfigurations.
Allows access to tagged WMS sources, e.g.
``sc['source_name:lyr,lyr2']`` will return the source with ``source_name``
and set ``req.layers`` to ``lyr1,lyr2``.
"""
def __getitem__(self, key):
layers = None
source_name = key
if ':' in source_name:
source_name, layers = source_name.split(':', 1)
source = dict.__getitem__(self, source_name)
if not layers:
return source
if source.conf.get('type') not in ('wms', 'mapserver', 'mapnik'):
raise ConfigurationError("found ':' in: '%s'."
" tagged sources only supported for WMS/Mapserver/Mapnik" % key)
uses_req = source.conf.get('type') != 'mapnik'
source = copy(source)
source.conf = deepcopy(source.conf)
if uses_req:
supported_layers = source.conf['req'].get('layers', [])
else:
supported_layers = source.conf.get('layers', [])
supported_layer_set = SourcesCollection.layer_set(supported_layers)
layer_set = SourcesCollection.layer_set(layers)
if supported_layer_set and not layer_set.issubset(supported_layer_set):
raise ConfigurationError('layers (%s) not supported by source (%s)' % (
layers, ','.join(supported_layer_set)))
if uses_req:
source.conf['req']['layers'] = layers
else:
source.conf['layers'] = layers
return source
def __contains__(self, key):
source_name = key
if ':' in source_name:
source_name, _ = source_name.split(':', 1)
return dict.__contains__(self, source_name)
@staticmethod
def layer_set(layers):
if isinstance(layers, (list, tuple)):
return set(layers)
return set(layers.split(','))
class SourceConfiguration(ConfigurationBase):
supports_meta_tiles = True
@classmethod
def load(cls, conf, context):
source_type = conf['type']
subclass = source_configuration_types.get(source_type)
if not subclass:
raise ConfigurationError("unknown source type '%s'" % source_type)
return subclass(conf, context)
@memoize
def coverage(self):
if not 'coverage' in self.conf: return None
from mapproxy.config.coverage import load_coverage
return load_coverage(self.conf['coverage'])
def image_opts(self, format=None):
if 'transparent' in self.conf:
self.conf.setdefault('image', {})['transparent'] = self.conf['transparent']
return self.context.globals.image_options.image_opts(self.conf.get('image', {}), format)
def http_client(self, url):
from mapproxy.client.http import auth_data_from_url, HTTPClient
http_client = None
url, (username, password) = auth_data_from_url(url)
insecure = ssl_ca_certs = None
if 'https' in url:
insecure = self.context.globals.get_value('http.ssl_no_cert_checks', self.conf)
ssl_ca_certs = self.context.globals.get_path('http.ssl_ca_certs', self.conf)
timeout = self.context.globals.get_value('http.client_timeout', self.conf)
headers = self.context.globals.get_value('http.headers', self.conf)
http_client = HTTPClient(url, username, password, insecure=insecure,
ssl_ca_certs=ssl_ca_certs, timeout=timeout,
headers=headers)
return http_client, url
@memoize
def on_error_handler(self):
if not 'on_error' in self.conf: return None
from mapproxy.source.error import HTTPSourceErrorHandler
error_handler = HTTPSourceErrorHandler()
for status_code, response_conf in iteritems(self.conf['on_error']):
if not isinstance(status_code, int) and status_code != 'other':
raise ConfigurationError("invalid error code %r in on_error", status_code)
cacheable = response_conf.get('cache', False)
color = response_conf.get('response', 'transparent')
if color == 'transparent':
color = (255, 255, 255, 0)
else:
color = parse_color(color)
error_handler.add_handler(status_code, color, cacheable)
return error_handler
def resolution_range(conf):
from mapproxy.grid import resolution_range as _resolution_range
if 'min_res' in conf or 'max_res' in conf:
return _resolution_range(min_res=conf.get('min_res'),
max_res=conf.get('max_res'))
if 'min_scale' in conf or 'max_scale' in conf:
return _resolution_range(min_scale=conf.get('min_scale'),
max_scale=conf.get('max_scale'))
class ArcGISSourceConfiguration(SourceConfiguration):
source_type = ('arcgis',)
def __init__(self, conf, context):
SourceConfiguration.__init__(self, conf, context)
def source(self, params=None):
from mapproxy.client.arcgis import ArcGISClient
from mapproxy.source.arcgis import ArcGISSource
from mapproxy.srs import SRS
from mapproxy.request.arcgis import create_request
if not self.conf.get('opts', {}).get('map', True):
return None
if not self.context.seed and self.conf.get('seed_only'):
from mapproxy.source import DummySource
return DummySource(coverage=self.coverage())
# Get the supported SRS codes and formats from the configuration.
supported_srs = [SRS(code) for code in self.conf.get("supported_srs", [])]
supported_formats = [file_ext(f) for f in self.conf.get("supported_formats", [])]
# Construct the parameters
if params is None:
params = {}
request_format = self.conf['req'].get('format')
if request_format:
params['format'] = request_format
request = create_request(self.conf["req"], params)
http_client, request.url = self.http_client(request.url)
coverage = self.coverage()
res_range = resolution_range(self.conf)
client = ArcGISClient(request, http_client)
image_opts = self.image_opts(format=params.get('format'))
return ArcGISSource(client, image_opts=image_opts, coverage=coverage,
res_range=res_range,
supported_srs=supported_srs,
supported_formats=supported_formats or None)
def fi_source(self, params=None):
from mapproxy.client.arcgis import ArcGISInfoClient
from mapproxy.request.arcgis import create_identify_request
from mapproxy.source.arcgis import ArcGISInfoSource
from mapproxy.srs import SRS
if params is None: params = {}
request_format = self.conf['req'].get('format')
if request_format:
params['format'] = request_format
supported_srs = [SRS(code) for code in self.conf.get('supported_srs', [])]
fi_source = None
if self.conf.get('opts', {}).get('featureinfo', False):
opts = self.conf['opts']
tolerance = opts.get('featureinfo_tolerance', 5)
return_geometries = opts.get('featureinfo_return_geometries', False)
fi_request = create_identify_request(self.conf['req'], params)
http_client, fi_request.url = self.http_client(fi_request.url)
fi_client = ArcGISInfoClient(fi_request,
supported_srs=supported_srs,
http_client=http_client,
tolerance=tolerance,
return_geometries=return_geometries,
)
fi_source = ArcGISInfoSource(fi_client)
return fi_source
class WMSSourceConfiguration(SourceConfiguration):
source_type = ('wms',)
@staticmethod
def static_legend_source(url, context):
from mapproxy.cache.legend import LegendCache
from mapproxy.client.wms import WMSLegendURLClient
from mapproxy.source.wms import WMSLegendSource
cache_dir = os.path.join(context.globals.get_path('cache.base_dir', {}),
'legends')
if url.startswith('file://') and not url.startswith('file:///'):
prefix = 'file://'
url = prefix + context.globals.abspath(url[7:])
lg_client = WMSLegendURLClient(url)
legend_cache = LegendCache(cache_dir=cache_dir)
return WMSLegendSource([lg_client], legend_cache, static=True)
def fi_xslt_transformer(self, conf, context):
from mapproxy.featureinfo import XSLTransformer, has_xslt_support
fi_transformer = None
fi_xslt = conf.get('featureinfo_xslt')
if fi_xslt:
if not has_xslt_support:
raise ValueError('featureinfo_xslt requires lxml. Please install.')
fi_xslt = context.globals.abspath(fi_xslt)
fi_transformer = XSLTransformer(fi_xslt)
return fi_transformer
def image_opts(self, format=None):
if 'transparent' not in (self.conf.get('image') or {}):
transparent = self.conf['req'].get('transparent')
if transparent is not None:
transparent = bool(str(transparent).lower() == 'true')
self.conf.setdefault('image', {})['transparent'] = transparent
return SourceConfiguration.image_opts(self, format=format)
def source(self, params=None):
from mapproxy.client.wms import WMSClient
from mapproxy.request.wms import create_request
from mapproxy.source.wms import WMSSource
from mapproxy.srs import SRS
if not self.conf.get('wms_opts', {}).get('map', True):
return None
if not self.context.seed and self.conf.get('seed_only'):
from mapproxy.source import DummySource
return DummySource(coverage=self.coverage())
if params is None: params = {}
request_format = self.conf['req'].get('format')
if request_format:
params['format'] = request_format
image_opts = self.image_opts(format=params.get('format'))
supported_srs = [SRS(code) for code in self.conf.get('supported_srs', [])]
supported_formats = [file_ext(f) for f in self.conf.get('supported_formats', [])]
version = self.conf.get('wms_opts', {}).get('version', '1.1.1')
lock = None
concurrent_requests = self.context.globals.get_value('concurrent_requests', self.conf,
global_key='http.concurrent_requests')
if concurrent_requests:
from mapproxy.util.lock import SemLock
lock_dir = self.context.globals.get_path('cache.lock_dir', self.conf)
lock_timeout = self.context.globals.get_value('http.client_timeout', self.conf)
url = urlparse.urlparse(self.conf['req']['url'])
md5 = hashlib.md5(url.netloc.encode('ascii'))
lock_file = os.path.join(lock_dir, md5.hexdigest() + '.lck')
lock = lambda: SemLock(lock_file, concurrent_requests, timeout=lock_timeout)
coverage = self.coverage()
res_range = resolution_range(self.conf)
transparent_color = (self.conf.get('image') or {}).get('transparent_color')
transparent_color_tolerance = self.context.globals.get_value(
'image.transparent_color_tolerance', self.conf)
if transparent_color:
transparent_color = parse_color(transparent_color)
http_method = self.context.globals.get_value('http.method', self.conf)
fwd_req_params = set(self.conf.get('forward_req_params', []))
request = create_request(self.conf['req'], params, version=version,
abspath=self.context.globals.abspath)
http_client, request.url = self.http_client(request.url)
client = WMSClient(request, http_client=http_client,
http_method=http_method, lock=lock,
fwd_req_params=fwd_req_params)
return WMSSource(client, image_opts=image_opts, coverage=coverage,
res_range=res_range, transparent_color=transparent_color,
transparent_color_tolerance=transparent_color_tolerance,
supported_srs=supported_srs,
supported_formats=supported_formats or None,
fwd_req_params=fwd_req_params)
def fi_source(self, params=None):
from mapproxy.client.wms import WMSInfoClient
from mapproxy.request.wms import create_request
from mapproxy.source.wms import WMSInfoSource
from mapproxy.srs import SRS
if params is None: params = {}
request_format = self.conf['req'].get('format')
if request_format:
params['format'] = request_format
supported_srs = [SRS(code) for code in self.conf.get('supported_srs', [])]
fi_source = None
if self.conf.get('wms_opts', {}).get('featureinfo', False):
wms_opts = self.conf['wms_opts']
version = wms_opts.get('version', '1.1.1')
if 'featureinfo_format' in wms_opts:
params['info_format'] = wms_opts['featureinfo_format']
fi_request = create_request(self.conf['req'], params,
req_type='featureinfo', version=version,
abspath=self.context.globals.abspath)
fi_transformer = self.fi_xslt_transformer(self.conf.get('wms_opts', {}),
self.context)
http_client, fi_request.url = self.http_client(fi_request.url)
fi_client = WMSInfoClient(fi_request, supported_srs=supported_srs,
http_client=http_client)
fi_source = WMSInfoSource(fi_client, fi_transformer=fi_transformer)
return fi_source
def lg_source(self, params=None):
from mapproxy.cache.legend import LegendCache
from mapproxy.client.wms import WMSLegendClient
from mapproxy.request.wms import create_request
from mapproxy.source.wms import WMSLegendSource
if params is None: params = {}
request_format = self.conf['req'].get('format')
if request_format:
params['format'] = request_format
lg_source = None
cache_dir = os.path.join(self.context.globals.get_path('cache.base_dir', {}),
'legends')
if self.conf.get('wms_opts', {}).get('legendurl', False):
lg_url = self.conf.get('wms_opts', {}).get('legendurl')
lg_source = WMSSourceConfiguration.static_legend_source(lg_url, self.context)
elif self.conf.get('wms_opts', {}).get('legendgraphic', False):
version = self.conf.get('wms_opts', {}).get('version', '1.1.1')
lg_req = self.conf['req'].copy()
lg_clients = []
lg_layers = str(lg_req['layers']).split(',')
del lg_req['layers']
for lg_layer in lg_layers:
lg_req['layer'] = lg_layer
lg_request = create_request(lg_req, params,
req_type='legendgraphic', version=version,
abspath=self.context.globals.abspath)
http_client, lg_request.url = self.http_client(lg_request.url)
lg_client = WMSLegendClient(lg_request, http_client=http_client)
lg_clients.append(lg_client)
legend_cache = LegendCache(cache_dir=cache_dir)
lg_source = WMSLegendSource(lg_clients, legend_cache)
return lg_source
class MapServerSourceConfiguration(WMSSourceConfiguration):
source_type = ('mapserver',)
def __init__(self, conf, context):
WMSSourceConfiguration.__init__(self, conf, context)
self.script = self.context.globals.get_path('mapserver.binary',
self.conf)
if not self.script:
self.script = find_exec('mapserv')
if not self.script or not os.path.isfile(self.script):
raise ConfigurationError('could not find mapserver binary (%r)' %
(self.script, ))
# set url to dummy script name, required as identifier
# for concurrent_request
self.conf['req']['url'] = 'mapserver://' + self.script
mapfile = self.context.globals.abspath(self.conf['req']['map'])
self.conf['req']['map'] = mapfile
def http_client(self, url):
working_dir = self.context.globals.get_path('mapserver.working_dir', self.conf)
if working_dir and not os.path.isdir(working_dir):
raise ConfigurationError('could not find mapserver working_dir (%r)' % (working_dir, ))
from mapproxy.client.cgi import CGIClient
client = CGIClient(script=self.script, working_directory=working_dir)
return client, url
class MapnikSourceConfiguration(SourceConfiguration):
source_type = ('mapnik',)
def source(self, params=None):
if not self.context.seed and self.conf.get('seed_only'):
from mapproxy.source import DummySource
return DummySource(coverage=self.coverage())
image_opts = self.image_opts()
lock = None
concurrent_requests = self.context.globals.get_value('concurrent_requests', self.conf,
global_key='http.concurrent_requests')
if concurrent_requests:
from mapproxy.util.lock import SemLock
lock_dir = self.context.globals.get_path('cache.lock_dir', self.conf)
md5 = hashlib.md5(self.conf['mapfile'])
lock_file = os.path.join(lock_dir, md5.hexdigest() + '.lck')
lock = lambda: SemLock(lock_file, concurrent_requests)
coverage = self.coverage()
res_range = resolution_range(self.conf)
scale_factor = self.conf.get('scale_factor', None)
layers = self.conf.get('layers', None)
if isinstance(layers, string_type):
layers = layers.split(',')
mapfile = self.context.globals.abspath(self.conf['mapfile'])
if self.conf.get('use_mapnik2', False):
warnings.warn('use_mapnik2 option is no longer needed for Mapnik 2 support',
DeprecationWarning)
from mapproxy.source.mapnik import MapnikSource, mapnik as mapnik_api
if mapnik_api is None:
raise ConfigurationError('Could not import Mapnik, please verify it is installed!')
if self.context.renderd:
# only renderd guarantees that we have a single proc/thread
# that accesses the same mapnik map object
reuse_map_objects = True
else:
reuse_map_objects = False
return MapnikSource(mapfile, layers=layers, image_opts=image_opts,
coverage=coverage, res_range=res_range, lock=lock,
reuse_map_objects=reuse_map_objects, scale_factor=scale_factor)
class TileSourceConfiguration(SourceConfiguration):
supports_meta_tiles = False
source_type = ('tile',)
defaults = {}
def source(self, params=None):
from mapproxy.client.tile import TileClient, TileURLTemplate
from mapproxy.source.tile import TiledSource
if not self.context.seed and self.conf.get('seed_only'):
from mapproxy.source import DummySource
return DummySource(coverage=self.coverage())
if params is None: params = {}
url = self.conf['url']
if self.conf.get('origin'):
warnings.warn('origin for tile sources is deprecated since 1.3.0 '
'and will be ignored. use grid with correct origin.', RuntimeWarning)
http_client, url = self.http_client(url)
grid_name = self.conf.get('grid')
if grid_name is None:
log.warn("tile source for %s does not have a grid configured and defaults to GLOBAL_MERCATOR. default will change with MapProxy 2.0", url)
grid_name = "GLOBAL_MERCATOR"
grid = self.context.grids[grid_name].tile_grid()
coverage = self.coverage()
res_range = resolution_range(self.conf)
image_opts = self.image_opts()
error_handler = self.on_error_handler()
format = file_ext(params['format'])
client = TileClient(TileURLTemplate(url, format=format), http_client=http_client, grid=grid)
return TiledSource(grid, client, coverage=coverage, image_opts=image_opts,
error_handler=error_handler, res_range=res_range)
def file_ext(mimetype):
from mapproxy.request.base import split_mime_type
_mime_class, format, _options = split_mime_type(mimetype)
return format
class DebugSourceConfiguration(SourceConfiguration):
source_type = ('debug',)
required_keys = set('type'.split())
def source(self, params=None):
from mapproxy.source import DebugSource
return DebugSource()
source_configuration_types = {
'wms': WMSSourceConfiguration,
'arcgis': ArcGISSourceConfiguration,
'tile': TileSourceConfiguration,
'debug': DebugSourceConfiguration,
'mapserver': MapServerSourceConfiguration,
'mapnik': MapnikSourceConfiguration,
}
class CacheConfiguration(ConfigurationBase):
defaults = {'format': 'image/png'}
@memoize
def cache_dir(self):
cache_dir = self.conf.get('cache', {}).get('directory')
if cache_dir:
if self.conf.get('cache_dir'):
log.warn('found cache.directory and cache_dir option for %s, ignoring cache_dir',
self.conf['name'])
return self.context.globals.abspath(cache_dir)
return self.context.globals.get_path('cache_dir', self.conf,
global_key='cache.base_dir')
@memoize
def has_multiple_grids(self):
return len(self.grid_confs()) > 1
def lock_dir(self):
lock_dir = self.context.globals.get_path('cache.tile_lock_dir', self.conf)
if not lock_dir:
lock_dir = os.path.join(self.cache_dir(), 'tile_locks')
return lock_dir
def _file_cache(self, grid_conf, file_ext):
from mapproxy.cache.file import FileCache
cache_dir = self.cache_dir()
directory_layout = self.conf.get('cache', {}).get('directory_layout', 'tc')
if self.conf.get('cache', {}).get('directory'):
if self.has_multiple_grids():
raise ConfigurationError(
"using single directory for cache with multiple grids in %s" %
(self.conf['name']),
)
pass
elif self.conf.get('cache', {}).get('use_grid_names'):
cache_dir = os.path.join(cache_dir, self.conf['name'], grid_conf.tile_grid().name)
else:
suffix = grid_conf.conf['srs'].replace(':', '')
cache_dir = os.path.join(cache_dir, self.conf['name'] + '_' + suffix)
link_single_color_images = self.context.globals.get_value('link_single_color_images', self.conf,
global_key='cache.link_single_color_images')
if link_single_color_images and sys.platform == 'win32':
log.warn('link_single_color_images not supported on windows')
link_single_color_images = False
return FileCache(
cache_dir,
file_ext=file_ext,
directory_layout=directory_layout,
link_single_color_images=link_single_color_images,
)
def _mbtiles_cache(self, grid_conf, file_ext):
from mapproxy.cache.mbtiles import MBTilesCache
filename = self.conf['cache'].get('filename')
if not filename:
filename = self.conf['name'] + '.mbtiles'
if filename.startswith('.' + os.sep):
mbfile_path = self.context.globals.abspath(filename)
else:
mbfile_path = os.path.join(self.cache_dir(), filename)
sqlite_timeout = self.context.globals.get_value('cache.sqlite_timeout', self.conf)
wal = self.context.globals.get_value('cache.sqlite_wal', self.conf)
return MBTilesCache(
mbfile_path,
timeout=sqlite_timeout,
wal=wal,
)
def _geopackage_cache(self, grid_conf, file_ext):
from mapproxy.cache.geopackage import GeopackageCache, GeopackageLevelCache
filename = self.conf['cache'].get('filename')
table_name = self.conf['cache'].get('table_name') or \
"{}_{}".format(self.conf['name'], grid_conf.tile_grid().name)
levels = self.conf['cache'].get('levels')
if not filename:
filename = self.conf['name'] + '.gpkg'
if filename.startswith('.' + os.sep):
gpkg_file_path = self.context.globals.abspath(filename)
else:
gpkg_file_path = os.path.join(self.cache_dir(), filename)
cache_dir = self.conf['cache'].get('directory')
if cache_dir:
cache_dir = os.path.join(
self.context.globals.abspath(cache_dir),
grid_conf.tile_grid().name
)
else:
cache_dir = self.cache_dir()
cache_dir = os.path.join(
cache_dir,
self.conf['name'],
grid_conf.tile_grid().name
)
if levels:
return GeopackageLevelCache(
cache_dir, grid_conf.tile_grid(), table_name
)
else:
return GeopackageCache(
gpkg_file_path, grid_conf.tile_grid(), table_name
)
def _s3_cache(self, grid_conf, file_ext):
from mapproxy.cache.s3 import S3Cache
bucket_name = self.context.globals.get_value('cache.bucket_name', self.conf,
global_key='cache.s3.bucket_name')
if not bucket_name:
raise ConfigurationError("no bucket_name configured for s3 cache %s" % self.conf['name'])
profile_name = self.context.globals.get_value('cache.profile_name', self.conf,
global_key='cache.s3.profile_name')
directory_layout = self.conf['cache'].get('directory_layout', 'tms')
base_path = self.conf['cache'].get('directory', None)
if base_path is None:
base_path = os.path.join(self.conf['name'], grid_conf.tile_grid().name)
return S3Cache(
base_path=base_path,
file_ext=file_ext,
directory_layout=directory_layout,
bucket_name=bucket_name,
profile_name=profile_name,
)
def _sqlite_cache(self, grid_conf, file_ext):
from mapproxy.cache.mbtiles import MBTilesLevelCache
cache_dir = self.conf.get('cache', {}).get('directory')
if cache_dir:
cache_dir = os.path.join(
self.context.globals.abspath(cache_dir),
grid_conf.tile_grid().name
)
else:
cache_dir = self.cache_dir()
cache_dir = os.path.join(
cache_dir,
self.conf['name'],
grid_conf.tile_grid().name
)
sqlite_timeout = self.context.globals.get_value('cache.sqlite_timeout', self.conf)
wal = self.context.globals.get_value('cache.sqlite_wal', self.conf)
return MBTilesLevelCache(
cache_dir,
timeout=sqlite_timeout,
wal=wal,
)
def _couchdb_cache(self, grid_conf, file_ext):
from mapproxy.cache.couchdb import CouchDBCache, CouchDBMDTemplate
db_name = self.conf['cache'].get('db_name')
if not db_name:
suffix = grid_conf.conf['srs'].replace(':', '')
db_name = self.conf['name'] + '_' + suffix
url = self.conf['cache'].get('url')
if not url:
url = 'http://127.0.0.1:5984'
md_template = CouchDBMDTemplate(self.conf['cache'].get('tile_metadata', {}))
tile_id = self.conf['cache'].get('tile_id')
return CouchDBCache(url=url, db_name=db_name,
file_ext=file_ext, tile_grid=grid_conf.tile_grid(),
md_template=md_template, tile_id_template=tile_id)
def _riak_cache(self, grid_conf, file_ext):
from mapproxy.cache.riak import RiakCache
default_ports = self.conf['cache'].get('default_ports', {})
default_pb_port = default_ports.get('pb', 8087)
default_http_port = default_ports.get('http', 8098)
nodes = self.conf['cache'].get('nodes')
if not nodes:
nodes = [{'host': '127.0.0.1'}]
for n in nodes:
if 'pb_port' not in n:
n['pb_port'] = default_pb_port
if 'http_port' not in n:
n['http_port'] = default_http_port
protocol = self.conf['cache'].get('protocol', 'pbc')
bucket = self.conf['cache'].get('bucket')
if not bucket:
suffix = grid_conf.tile_grid().name
bucket = self.conf['name'] + '_' + suffix
use_secondary_index = self.conf['cache'].get('secondary_index', False)
return RiakCache(nodes=nodes, protocol=protocol, bucket=bucket,
tile_grid=grid_conf.tile_grid(),
use_secondary_index=use_secondary_index,
)
def _redis_cache(self, grid_conf, file_ext):
from mapproxy.cache.redis import RedisCache
host = self.conf['cache'].get('host', '127.0.0.1')
port = self.conf['cache'].get('port', 6379)
db = self.conf['cache'].get('db', 0)
ttl = self.conf['cache'].get('default_ttl', 3600)
prefix = self.conf['cache'].get('prefix')
if not prefix:
prefix = self.conf['name'] + '_' + grid_conf.tile_grid().name
return RedisCache(
host=host,
port=port,
db=db,
prefix=prefix,
ttl=ttl,
)
def _compact_cache(self, grid_conf, file_ext):
from mapproxy.cache.compact import CompactCacheV1
cache_dir = self.cache_dir()
if self.conf.get('cache', {}).get('directory'):
if self.has_multiple_grids():
raise ConfigurationError(
"using single directory for cache with multiple grids in %s" %
(self.conf['name']),
)
pass
else:
cache_dir = os.path.join(cache_dir, self.conf['name'], grid_conf.tile_grid().name)
if self.conf['cache']['version'] != 1:
raise ConfigurationError("compact cache only supports version 1")
return CompactCacheV1(
cache_dir=cache_dir,
)
def _tile_cache(self, grid_conf, file_ext):
if self.conf.get('disable_storage', False):
from mapproxy.cache.dummy import DummyCache
return DummyCache()
grid_conf.tile_grid() #create to resolve `base` in grid_conf.conf
cache_type = self.conf.get('cache', {}).get('type', 'file')
return getattr(self, '_%s_cache' % cache_type)(grid_conf, file_ext)
def _tile_filter(self):
filters = []
if 'watermark' in self.conf:
from mapproxy.tilefilter import create_watermark_filter
if self.conf['watermark'].get('color'):
self.conf['watermark']['color'] = parse_color(self.conf['watermark']['color'])
f = create_watermark_filter(self.conf, self.context)
if f:
filters.append(f)
return filters
@memoize
def image_opts(self):
from mapproxy.image.opts import ImageFormat
format = None
if 'format' not in self.conf.get('image', {}):
format = self.conf.get('format') or self.conf.get('request_format')
image_opts = self.context.globals.image_options.image_opts(self.conf.get('image', {}), format)
if image_opts.format is None:
if format is not None and format.startswith('image/'):
image_opts.format = ImageFormat(format)
else:
image_opts.format = ImageFormat('image/png')
return image_opts
def supports_tiled_only_access(self, params=None, tile_grid=None):
caches = self.caches()
if len(caches) > 1:
return False
cache_grid, extent, tile_manager = caches[0]
image_opts = self.image_opts()
if (tile_grid.is_subset_of(cache_grid)
and params.get('format') == image_opts.format):
return True
return False
def source(self, params=None, tile_grid=None, tiled_only=False):
from mapproxy.source.tile import CacheSource
from mapproxy.layer import map_extent_from_grid
caches = self.caches()
if len(caches) > 1:
# cache with multiple grids/sources
source = self.map_layer()
source.supports_meta_tiles = True
return source
cache_grid, extent, tile_manager = caches[0]
image_opts = self.image_opts()
cache_extent = map_extent_from_grid(tile_grid)
cache_extent = extent.intersection(cache_extent)
source = CacheSource(tile_manager, extent=cache_extent,
image_opts=image_opts, tiled_only=tiled_only)
return source
def _sources_for_grid(self, source_names, grid_conf, request_format):
sources = []
source_image_opts = []
# a cache can directly access source tiles when _all_ sources are caches too
# and when they have compatible grids by using tiled_only on the CacheSource
# check if all sources support tiled_only
tiled_only = True
for source_name in source_names:
if source_name in self.context.sources:
tiled_only = False
break
elif source_name in self.context.caches:
cache_conf = self.context.caches[source_name]
tiled_only = cache_conf.supports_tiled_only_access(
params={'format': request_format},
tile_grid=grid_conf.tile_grid(),
)
if not tiled_only:
break
for source_name in source_names:
if source_name in self.context.sources:
source_conf = self.context.sources[source_name]
source = source_conf.source({'format': request_format})
elif source_name in self.context.caches:
cache_conf = self.context.caches[source_name]
source = cache_conf.source(
params={'format': request_format},
tile_grid=grid_conf.tile_grid(),
tiled_only=tiled_only,
)
else:
raise ConfigurationError('unknown source %s' % source_name)
if source:
sources.append(source)
source_image_opts.append(source.image_opts)
return sources, source_image_opts
def _sources_for_band_merge(self, sources_conf, grid_conf, request_format):
from mapproxy.image.merge import BandMerger
source_names = []
for band, band_sources in iteritems(sources_conf):
for source in band_sources:
name = source['source']
if name in source_names:
idx = source_names.index(name)
else:
source_names.append(name)
idx = len(source_names) - 1
source["src_idx"] = idx
sources, source_image_opts = self._sources_for_grid(
source_names=source_names,
grid_conf=grid_conf,
request_format=request_format,
)
if 'l' in sources_conf:
mode = 'L'
elif 'a' in sources_conf:
mode = 'RGBA'
else:
mode = 'RGB'
band_merger = BandMerger(mode=mode)
available_bands = {'r': 0, 'g': 1, 'b': 2, 'a': 3, 'l': 0}
for band, band_sources in iteritems(sources_conf):
band_idx = available_bands.get(band)
if band_idx is None:
raise ConfigurationError("unsupported band '%s' for cache %s"
% (band, self.conf['name']))
for source in band_sources:
band_merger.add_ops(
dst_band=band_idx,
src_img=source['src_idx'],
src_band=source['band'],
factor=source.get('factor', 1.0),
)
return band_merger, sources, source_image_opts
@memoize
def caches(self):
from mapproxy.cache.dummy import DummyCache, DummyLocker
from mapproxy.cache.tile import TileManager
from mapproxy.cache.base import TileLocker
from mapproxy.image.opts import compatible_image_options
from mapproxy.layer import map_extent_from_grid, merge_layer_extents
base_image_opts = self.image_opts()
if self.conf.get('format') == 'mixed' and not self.conf.get('request_format') == 'image/png':
raise ConfigurationError('request_format must be set to image/png if mixed mode is enabled')
request_format = self.conf.get('request_format') or self.conf.get('format')
if '/' in request_format:
request_format_ext = request_format.split('/', 1)[1]
else:
request_format_ext = request_format
caches = []
meta_buffer = self.context.globals.get_value('meta_buffer', self.conf,
global_key='cache.meta_buffer')
meta_size = self.context.globals.get_value('meta_size', self.conf,
global_key='cache.meta_size')
bulk_meta_tiles = self.context.globals.get_value('bulk_meta_tiles', self.conf,
global_key='cache.bulk_meta_tiles')
minimize_meta_requests = self.context.globals.get_value('minimize_meta_requests', self.conf,
global_key='cache.minimize_meta_requests')
concurrent_tile_creators = self.context.globals.get_value('concurrent_tile_creators', self.conf,
global_key='cache.concurrent_tile_creators')
renderd_address = self.context.globals.get_value('renderd.address', self.conf)
band_merger = None
for grid_name, grid_conf in self.grid_confs():
if isinstance(self.conf['sources'], dict):
band_merger, sources, source_image_opts = self._sources_for_band_merge(
self.conf['sources'],
grid_conf=grid_conf,
request_format=request_format,
)
else:
sources, source_image_opts = self._sources_for_grid(
self.conf['sources'],
grid_conf=grid_conf,
request_format=request_format,
)
if not sources:
from mapproxy.source import DummySource
sources = [DummySource()]
source_image_opts.append(sources[0].image_opts)
tile_grid = grid_conf.tile_grid()
tile_filter = self._tile_filter()
image_opts = compatible_image_options(source_image_opts, base_opts=base_image_opts)
cache = self._tile_cache(grid_conf, image_opts.format.ext)
identifier = self.conf['name'] + '_' + tile_grid.name
tile_creator_class = None
use_renderd = bool(renderd_address)
if self.context.renderd:
# we _are_ renderd
use_renderd = False
if self.conf.get('disable_storage', False):
# can't ask renderd to create tiles that shouldn't be cached
use_renderd = False
if use_renderd:
from mapproxy.cache.renderd import RenderdTileCreator, has_renderd_support
if not has_renderd_support():
raise ConfigurationError("renderd requires requests library")
if self.context.seed:
priority = 10
else:
priority = 100
cache_dir = self.cache_dir()
lock_dir = self.context.globals.get_value('cache.tile_lock_dir')
if not lock_dir:
lock_dir = os.path.join(cache_dir, 'tile_locks')
lock_timeout = self.context.globals.get_value('http.client_timeout', {})
locker = TileLocker(lock_dir, lock_timeout, identifier + '_renderd')
# TODO band_merger
tile_creator_class = partial(RenderdTileCreator, renderd_address,
priority=priority, tile_locker=locker)
else:
from mapproxy.cache.tile import TileCreator
tile_creator_class = partial(TileCreator, image_merger=band_merger)
if isinstance(cache, DummyCache):
locker = DummyLocker()
else:
locker = TileLocker(
lock_dir=self.lock_dir(),
lock_timeout=self.context.globals.get_value('http.client_timeout', {}),
lock_cache_id=cache.lock_cache_id,
)
mgr = TileManager(tile_grid, cache, sources, image_opts.format.ext,
locker=locker,
image_opts=image_opts, identifier=identifier,
request_format=request_format_ext,
meta_size=meta_size, meta_buffer=meta_buffer,
minimize_meta_requests=minimize_meta_requests,
concurrent_tile_creators=concurrent_tile_creators,
pre_store_filter=tile_filter,
tile_creator_class=tile_creator_class,
bulk_meta_tiles=bulk_meta_tiles,
)
extent = merge_layer_extents(sources)
if extent.is_default:
extent = map_extent_from_grid(tile_grid)
caches.append((tile_grid, extent, mgr))
return caches
@memoize
def grid_confs(self):
grid_names = self.conf.get('grids')
if grid_names is None:
log.warn('cache %s does not have any grids. default will change from [GLOBAL_MERCATOR] to [GLOBAL_WEBMERCATOR] with MapProxy 2.0',
self.conf['name'])
grid_names = ['GLOBAL_MERCATOR']
return [(g, self.context.grids[g]) for g in grid_names]
@memoize
def map_layer(self):
from mapproxy.layer import CacheMapLayer, SRSConditional, ResolutionConditional
image_opts = self.image_opts()
max_tile_limit = self.context.globals.get_value('max_tile_limit', self.conf,
global_key='cache.max_tile_limit')
caches = []
main_grid = None
for grid, extent, tile_manager in self.caches():
if main_grid is None:
main_grid = grid
caches.append((CacheMapLayer(tile_manager, extent=extent, image_opts=image_opts,
max_tile_limit=max_tile_limit),
(grid.srs,)))
if len(caches) == 1:
layer = caches[0][0]
else:
layer = SRSConditional(caches, caches[0][0].extent, opacity=image_opts.opacity)
if 'use_direct_from_level' in self.conf:
self.conf['use_direct_from_res'] = main_grid.resolution(self.conf['use_direct_from_level'])
if 'use_direct_from_res' in self.conf:
if len(self.conf['sources']) != 1:
raise ValueError('use_direct_from_level/res only supports single sources')
source_conf = self.context.sources[self.conf['sources'][0]]
layer = ResolutionConditional(layer, source_conf.source(), self.conf['use_direct_from_res'],
main_grid.srs, layer.extent, opacity=image_opts.opacity)
return layer
class WMSLayerConfiguration(ConfigurationBase):
@memoize
def wms_layer(self):
from mapproxy.service.wms import WMSGroupLayer
layers = []
this_layer = None
if 'layers' in self.conf:
layers_conf = self.conf['layers']
for layer_conf in layers_conf:
lyr = WMSLayerConfiguration(layer_conf, self.context).wms_layer()
if lyr:
layers.append(lyr)
if 'sources' in self.conf or 'legendurl' in self.conf:
this_layer = LayerConfiguration(self.conf, self.context).wms_layer()
if not layers and not this_layer:
return None
if not layers:
layer = this_layer
else:
layer = WMSGroupLayer(name=self.conf.get('name'), title=self.conf.get('title'),
this=this_layer, layers=layers, md=self.conf.get('md'))
return layer
def cache_source_names(context, cache):
"""
Return all sources for a cache, even if a caches uses another cache.
"""
source_names = []
for src in context.caches[cache].conf['sources']:
if src in context.caches and src not in context.sources:
source_names.extend(cache_source_names(context, src))
else:
source_names.append(src)
return source_names
class LayerConfiguration(ConfigurationBase):
@memoize
def wms_layer(self):
from mapproxy.service.wms import WMSLayer
sources = []
fi_sources = []
lg_sources = []
lg_sources_configured = False
if self.conf.get('legendurl'):
legend_url = self.conf['legendurl']
lg_sources.append(WMSSourceConfiguration.static_legend_source(legend_url, self.context))
lg_sources_configured = True
for source_name in self.conf.get('sources', []):
fi_source_names = []
lg_source_names = []
if source_name in self.context.caches:
map_layer = self.context.caches[source_name].map_layer()
fi_source_names = cache_source_names(self.context, source_name)
lg_source_names = cache_source_names(self.context, source_name)
elif source_name in self.context.sources:
source_conf = self.context.sources[source_name]
if not source_conf.supports_meta_tiles:
raise ConfigurationError('source "%s" of layer "%s" does not support un-tiled access'
% (source_name, self.conf.get('name')))
map_layer = source_conf.source()
fi_source_names = [source_name]
lg_source_names = [source_name]
else:
raise ConfigurationError('source/cache "%s" not found' % source_name)
if map_layer:
sources.append(map_layer)
for fi_source_name in fi_source_names:
if fi_source_name not in self.context.sources: continue
if not hasattr(self.context.sources[fi_source_name], 'fi_source'): continue
fi_source = self.context.sources[fi_source_name].fi_source()
if fi_source:
fi_sources.append(fi_source)
if not lg_sources_configured:
for lg_source_name in lg_source_names:
if lg_source_name not in self.context.sources: continue
if not hasattr(self.context.sources[lg_source_name], 'lg_source'): continue
lg_source = self.context.sources[lg_source_name].lg_source()
if lg_source:
lg_sources.append(lg_source)
res_range = resolution_range(self.conf)
layer = WMSLayer(self.conf.get('name'), self.conf.get('title'),
sources, fi_sources, lg_sources, res_range=res_range, md=self.conf.get('md'))
return layer
@memoize
def dimensions(self):
from mapproxy.layer import Dimension
dimensions = {}
for dimension, conf in iteritems(self.conf.get('dimensions', {})):
values = [str(val) for val in conf.get('values', ['default'])]
default = conf.get('default', values[-1])
dimensions[dimension.lower()] = Dimension(dimension, values, default=default)
return dimensions
@memoize
def tile_layers(self, grid_name_as_path=False):
from mapproxy.service.tile import TileLayer
from mapproxy.cache.dummy import DummyCache
sources = []
if 'tile_sources' in self.conf:
sources = self.conf['tile_sources']
else:
for source_name in self.conf.get('sources', []):
# we only support caches for tiled access...
if not source_name in self.context.caches:
if source_name in self.context.sources:
src_conf = self.context.sources[source_name].conf
# but we ignore debug layers for convenience
if src_conf['type'] == 'debug':
continue
# and WMS layers with map: False (i.e. FeatureInfo only sources)
if src_conf['type'] == 'wms' and src_conf.get('wms_opts', {}).get('map', True) == False:
continue
return []
sources.append(source_name)
if len(sources) > 1:
return []
dimensions = self.dimensions()
tile_layers = []
for cache_name in sources:
for grid, extent, cache_source in self.context.caches[cache_name].caches():
if dimensions and not isinstance(cache_source.cache, DummyCache):
# caching of dimension layers is not supported yet
raise ConfigurationError(
"caching of dimension layer (%s) is not supported yet."
" need to `disable_storage: true` on %s cache" % (self.conf['name'], cache_name)
)
md = {}
md['title'] = self.conf['title']
md['name'] = self.conf['name']
md['grid_name'] = grid.name
if grid_name_as_path:
md['name_path'] = (md['name'], md['grid_name'])
else:
md['name_path'] = (self.conf['name'], grid.srs.srs_code.replace(':', '').upper())
md['name_internal'] = md['name_path'][0] + '_' + md['name_path'][1]
md['format'] = self.context.caches[cache_name].image_opts().format
md['cache_name'] = cache_name
md['extent'] = extent
tile_layers.append(TileLayer(self.conf['name'], self.conf['title'],
md, cache_source, dimensions=dimensions))
return tile_layers
def fi_xslt_transformers(conf, context):
from mapproxy.featureinfo import XSLTransformer, has_xslt_support
fi_transformers = {}
fi_xslt = conf.get('featureinfo_xslt')
if fi_xslt:
if not has_xslt_support:
raise ValueError('featureinfo_xslt requires lxml. Please install.')
for info_type, fi_xslt in fi_xslt.items():
fi_xslt = context.globals.abspath(fi_xslt)
fi_transformers[info_type] = XSLTransformer(fi_xslt)
return fi_transformers
def extents_for_srs(bbox_srs):
from mapproxy.layer import DefaultMapExtent, MapExtent
from mapproxy.srs import SRS
extents = {}
for srs in bbox_srs:
if isinstance(srs, str):
bbox = DefaultMapExtent()
else:
srs, bbox = srs['srs'], srs['bbox']
bbox = MapExtent(bbox, SRS(srs))
extents[srs] = bbox
return extents
class ServiceConfiguration(ConfigurationBase):
def __init__(self, conf, context):
if 'wms' in conf:
if conf['wms'] is None:
conf['wms'] = {}
if 'md' not in conf['wms']:
conf['wms']['md'] = {'title': 'MapProxy WMS'}
ConfigurationBase.__init__(self, conf, context)
def services(self):
services = []
ows_services = []
for service_name, service_conf in iteritems(self.conf):
creator = getattr(self, service_name + '_service', None)
if not creator:
raise ValueError('unknown service: %s' % service_name)
new_services = creator(service_conf or {})
# a creator can return a list of services...
if not isinstance(new_services, (list, tuple)):
new_services = [new_services]
for new_service in new_services:
if getattr(new_service, 'service', None):
ows_services.append(new_service)
else:
services.append(new_service)
if ows_services:
from mapproxy.service.ows import OWSServer
services.append(OWSServer(ows_services))
return services
def tile_layers(self, conf, use_grid_names=False):
layers = odict()
for layer_name, layer_conf in iteritems(self.context.layers):
for tile_layer in layer_conf.tile_layers(grid_name_as_path=use_grid_names):
if not tile_layer: continue
if use_grid_names:
layers[tile_layer.md['name_path']] = tile_layer
else:
layers[tile_layer.md['name_internal']] = tile_layer
return layers
def kml_service(self, conf):
from mapproxy.service.kml import KMLServer
md = self.context.services.conf.get('wms', {}).get('md', {}).copy()
md.update(conf.get('md', {}))
max_tile_age = self.context.globals.get_value('tiles.expires_hours')
max_tile_age *= 60 * 60 # seconds
use_grid_names = conf.get('use_grid_names', False)
layers = self.tile_layers(conf, use_grid_names=use_grid_names)
return KMLServer(layers, md, max_tile_age=max_tile_age, use_dimension_layers=use_grid_names)
def tms_service(self, conf):
from mapproxy.service.tile import TileServer
md = self.context.services.conf.get('wms', {}).get('md', {}).copy()
md.update(conf.get('md', {}))
max_tile_age = self.context.globals.get_value('tiles.expires_hours')
max_tile_age *= 60 * 60 # seconds
origin = conf.get('origin')
use_grid_names = conf.get('use_grid_names', False)
layers = self.tile_layers(conf, use_grid_names=use_grid_names)
return TileServer(layers, md, max_tile_age=max_tile_age, use_dimension_layers=use_grid_names,
origin=origin)
def wmts_service(self, conf):
from mapproxy.service.wmts import WMTSServer, WMTSRestServer
md = self.context.services.conf.get('wms', {}).get('md', {}).copy()
md.update(conf.get('md', {}))
layers = self.tile_layers(conf, use_grid_names=True)
kvp = conf.get('kvp')
restful = conf.get('restful')
max_tile_age = self.context.globals.get_value('tiles.expires_hours')
max_tile_age *= 60 * 60 # seconds
if kvp is None and restful is None:
kvp = restful = True
services = []
if kvp:
services.append(WMTSServer(layers, md, max_tile_age=max_tile_age))
if restful:
template = conf.get('restful_template')
if template and '{{' in template:
# TODO remove warning in 1.6
log.warn("double braces in WMTS restful_template are deprecated {{x}} -> {x}")
services.append(WMTSRestServer(layers, md, template=template,
max_tile_age=max_tile_age))
return services
def wms_service(self, conf):
from mapproxy.service.wms import WMSServer
from mapproxy.request.wms import Version
md = conf.get('md', {})
inspire_md = conf.get('inspire_md', {})
tile_layers = self.tile_layers(conf)
attribution = conf.get('attribution')
strict = self.context.globals.get_value('strict', conf, global_key='wms.strict')
on_source_errors = self.context.globals.get_value('on_source_errors',
conf, global_key='wms.on_source_errors')
root_layer = self.context.wms_root_layer.wms_layer()
if not root_layer:
raise ConfigurationError("found no WMS layer")
if not root_layer.title:
# set title of root layer to WMS title
root_layer.title = md.get('title')
concurrent_layer_renderer = self.context.globals.get_value(
'concurrent_layer_renderer', conf,
global_key='wms.concurrent_layer_renderer')
image_formats_names = self.context.globals.get_value('image_formats', conf,
global_key='wms.image_formats')
image_formats = odict()
for format in image_formats_names:
opts = self.context.globals.image_options.image_opts({}, format)
if opts.format in image_formats:
log.warn('duplicate mime-type for WMS image_formats: "%s" already configured, will use last format',
opts.format)
image_formats[opts.format] = opts
info_types = conf.get('featureinfo_types')
srs = self.context.globals.get_value('srs', conf, global_key='wms.srs')
self.context.globals.base_config.wms.srs = srs
srs_extents = extents_for_srs(conf.get('bbox_srs', []))
versions = conf.get('versions')
if versions:
versions = sorted([Version(v) for v in versions])
versions = conf.get('versions')
if versions:
versions = sorted([Version(v) for v in versions])
max_output_pixels = self.context.globals.get_value('max_output_pixels', conf,
global_key='wms.max_output_pixels')
if isinstance(max_output_pixels, list):
max_output_pixels = max_output_pixels[0] * max_output_pixels[1]
max_tile_age = self.context.globals.get_value('tiles.expires_hours')
max_tile_age *= 60 * 60 # seconds
server = WMSServer(root_layer, md, attribution=attribution,
image_formats=image_formats, info_types=info_types,
srs=srs, tile_layers=tile_layers, strict=strict, on_error=on_source_errors,
concurrent_layer_renderer=concurrent_layer_renderer,
max_output_pixels=max_output_pixels, srs_extents=srs_extents,
max_tile_age=max_tile_age, versions=versions,
inspire_md=inspire_md,
)
server.fi_transformers = fi_xslt_transformers(conf, self.context)
return server
def demo_service(self, conf):
from mapproxy.service.demo import DemoServer
services = list(self.context.services.conf.keys())
md = self.context.services.conf.get('wms', {}).get('md', {}).copy()
md.update(conf.get('md', {}))
layers = odict()
for layer_name, layer_conf in iteritems(self.context.layers):
lyr = layer_conf.wms_layer()
if lyr:
layers[layer_name] = lyr
tile_layers = self.tile_layers(conf)
image_formats = self.context.globals.get_value('image_formats', conf, global_key='wms.image_formats')
srs = self.context.globals.get_value('srs', conf, global_key='wms.srs')
# WMTS restful template
wmts_conf = self.context.services.conf.get('wmts', {}) or {}
from mapproxy.service.wmts import WMTSRestServer
if wmts_conf:
restful_template = wmts_conf.get('restful_template', WMTSRestServer.default_template)
else:
restful_template = WMTSRestServer.default_template
if 'wmts' in self.context.services.conf:
kvp = wmts_conf.get('kvp')
restful = wmts_conf.get('restful')
if kvp is None and restful is None:
kvp = restful = True
if kvp:
services.append('wmts_kvp')
if restful:
services.append('wmts_restful')
if 'wms' in self.context.services.conf:
versions = self.context.services.conf['wms'].get('versions', ['1.1.1'])
if '1.1.1' in versions:
# demo service only supports 1.1.1, use wms_111 as an indicator
services.append('wms_111')
return DemoServer(layers, md, tile_layers=tile_layers,
image_formats=image_formats, srs=srs, services=services, restful_template=restful_template)
def load_configuration(mapproxy_conf, seed=False, ignore_warnings=True, renderd=False):
conf_base_dir = os.path.abspath(os.path.dirname(mapproxy_conf))
# A configuration is checked/validated four times, each step has a different
# focus and returns different errors. The steps are:
# 1. YAML loading: checks YAML syntax like tabs vs. space, indention errors, etc.
# 2. Options: checks all options agains the spec and validates their types,
# e.g is disable_storage a bool, is layers a list, etc.
# 3. References: checks if all referenced caches, sources and grids exist
# 4. Initialization: creates all MapProxy objects, returns on first error
try:
conf_dict = load_configuration_file([os.path.basename(mapproxy_conf)], conf_base_dir)
except YAMLError as ex:
raise ConfigurationError(ex)
errors, informal_only = validate_options(conf_dict)
for error in errors:
log.warn(error)
if not informal_only or (errors and not ignore_warnings):
raise ConfigurationError('invalid configuration')
errors = validate_references(conf_dict)
for error in errors:
log.warn(error)
return ProxyConfiguration(conf_dict, conf_base_dir=conf_base_dir, seed=seed,
renderd=renderd)
def load_configuration_file(files, working_dir):
"""
Return configuration dict from imported files
"""
# record all config files with timestamp for reloading
conf_dict = {'__config_files__': {}}
for conf_file in files:
conf_file = os.path.normpath(os.path.join(working_dir, conf_file))
log.info('reading: %s' % conf_file)
current_dict = load_yaml_file(conf_file)
conf_dict['__config_files__'][os.path.abspath(conf_file)] = os.path.getmtime(conf_file)
if 'base' in current_dict:
current_working_dir = os.path.dirname(conf_file)
base_files = current_dict.pop('base')
if isinstance(base_files, string_type):
base_files = [base_files]
imported_dict = load_configuration_file(base_files, current_working_dir)
current_dict = merge_dict(current_dict, imported_dict)
conf_dict = merge_dict(conf_dict, current_dict)
return conf_dict
def merge_dict(conf, base):
"""
Return `base` dict with values from `conf` merged in.
"""
for k, v in iteritems(conf):
if k not in base:
base[k] = v
else:
if isinstance(base[k], dict):
merge_dict(v, base[k])
else:
base[k] = v
return base
def parse_color(color):
"""
>>> parse_color((100, 12, 55))
(100, 12, 55)
>>> parse_color('0xff0530')
(255, 5, 48)
>>> parse_color('#FF0530')
(255, 5, 48)
>>> parse_color('#FF053080')
(255, 5, 48, 128)
"""
if isinstance(color, (list, tuple)) and 3 <= len(color) <= 4:
return tuple(color)
if not isinstance(color, string_type):
raise ValueError('color needs to be a tuple/list or 0xrrggbb/#rrggbb(aa) string, got %r' % color)
if color.startswith('0x'):
color = color[2:]
if color.startswith('#'):
color = color[1:]
r, g, b = map(lambda x: int(x, 16), [color[:2], color[2:4], color[4:6]])
if len(color) == 8:
a = int(color[6:8], 16)
return r, g, b, a
return r, g, b
| apache-2.0 |
xyguo/scikit-learn | sklearn/decomposition/base.py | 310 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
google/uncertainty-baselines | experimental/language_structure/vrnn/linear_vae_cell.py | 1 | 24349 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAE Cell in VRNN.
VAE Cell is the core component of VRNN [1]. It's migrated from the Pytorch
version [2].
## References:
[1]: Qiu Liang et al. Structured Attention for Unsupervised Dialogue Structure
Induction.
_arXiv preprint arXiv:2009.08552, 2020.
https://arxiv.org/pdf/2009.08552.pdf
[2]: https://github.com/Liang-Qiu/SVRNN-dialogues
"""
from typing import Any, Dict, Optional, Sequence, Union
import numpy as np
import tensorflow as tf
import bert_utils # local file import from baselines.clinc_intent
from vrnn import model_config # local file import from experimental.language_structure
from vrnn import utils # local file import from experimental.language_structure
from official.nlp.bert import bert_models
from official.nlp.bert import configs
INPUT_ID_NAME = 'input_word_ids'
INPUT_MASK_NAME = 'input_mask'
_TensorMapList = Sequence[Dict[str, tf.Tensor]]
class _BERT(tf.keras.Model):
"""BERT model ."""
def __init__(self, max_seq_length: int, bert_config: configs.BertConfig,
trainable: bool):
"""BERT class constructor.
Args:
max_seq_length: the maximum input sequence length.
bert_config: Configuration for a BERT model.
trainable: whether the model is trainable.
"""
super(_BERT, self).__init__()
self.bert_model = bert_models.get_transformer_encoder(
bert_config, max_seq_length)
self._trainable = trainable
self._vocab_size = bert_config.vocab_size
def call(self,
inputs: Dict[str, tf.Tensor],
return_sequence: bool = True) -> tf.Tensor:
sequence_output, cls_output = self.bert_model(inputs)
if not self._trainable:
sequence_output = tf.stop_gradient(sequence_output)
cls_output = tf.stop_gradient(cls_output)
if return_sequence:
return sequence_output
else:
return cls_output
@property
def vocab_size(self):
return self._vocab_size
class _Embedding(tf.keras.layers.Embedding):
"""Word embedding layer.
A wrapper class of tf.keras.layers.Embedding. It receives a
Dict[str, tf.Tensor] containing key ${input_id_key} and returns the embedding.
"""
def __init__(self, input_id_key: str, vocab_size: int, embed_size: int,
trainable: bool, **kwargs: Dict[str, Any]):
super(_Embedding, self).__init__(vocab_size, embed_size, **kwargs)
self._input_id_key = input_id_key
self._trainable = trainable
self._vocab_size = vocab_size
def call(self, inputs: Dict[str, tf.Tensor]) -> tf.Tensor:
outputs = super().call(inputs[self._input_id_key])
if not self._trainable:
outputs = tf.stop_gradient(outputs)
return outputs
@property
def vocab_size(self):
return self._vocab_size
def _build_embedding_layer(config: model_config.EmbeddingConfig,
max_seq_length: int):
"""Creates embedding layer of the specific `embedding_type`."""
if config.embedding_type == model_config.GLOVE_EMBED:
# If word_embedding_path is specified, use the embedding size of the
# pre-trained embeddings.
if config.word_embedding_path:
with tf.io.gfile.GFile(config.word_embedding_path,
'rb') as embedding_file:
word_embedding = np.load(embedding_file)
vocab_size, embed_size = word_embedding.shape
if config.vocab_size != vocab_size:
raise ValueError(
'Expected consistent vocab size between vocab.txt and the '
'embedding, found {} and {}.'.format(vocab_size, config.vocab_size))
config.embed_size = embed_size
embeddings_initializer = (tf.keras.initializers.Constant(word_embedding))
else:
embeddings_initializer = None
return _Embedding(
INPUT_ID_NAME,
config.vocab_size,
config.embed_size,
embeddings_initializer=embeddings_initializer,
input_length=max_seq_length,
trainable=config.trainable_embedding)
elif config.embedding_type == model_config.BERT_EMBED:
return _BERT(
max_seq_length,
bert_config=configs.BertConfig(**config.bert_config),
trainable=config.trainable_embedding)
raise ValueError('Invalid embedding type {}, expected {} or {}'.format(
config.embedding_type, model_config.GLOVE_EMBED, model_config.BERT_EMBED))
class _DualRNN(tf.keras.Model):
"""Dual RNN base class.
It receives two sentences (with masks) and the initial state of RNN, returns
the outputs of two RNNs.
To use the class, one needs to create an inheriting class implementing
_run_dual_rnn().
"""
def __init__(self,
hidden_size: int,
embedding_layer: Union[_BERT, _Embedding],
num_layers: int = 1,
dropout: float = 0.5,
cell_type: Optional[str] = 'lstm',
return_state: Optional[bool] = False,
**kwargs: Dict[str, Any]):
"""Dual RNN base class constructor.
Args:
hidden_size: the hidden layer size of the RNN.
embedding_layer: an embedding layer to be used.
num_layers: number of layers of the RNN.
dropout: dropout rate.
cell_type: the RNN cell type.
return_state: whether to include the final state in the outputs.
**kwargs: optional arguments from childern class to be passed to
_run_dual_rnn
"""
super(_DualRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._dropout = dropout
self._cell_type = cell_type
self._return_state = return_state
self.embedding_layer = embedding_layer
def build(self, input_shape):
self.dropout = tf.keras.layers.Dropout(self._dropout)
def call(self, input_1, input_2, initial_state, **kwargs):
embed_1 = self.embedding_layer(input_1)
embed_2 = self.embedding_layer(input_2)
input_mask_1 = self._get_input_mask(input_1)
input_mask_2 = self._get_input_mask(input_2)
output_1, output_2, state_1, state_2 = self._run_dual_rnn(
embed_1, embed_2, input_mask_1, input_mask_2, initial_state, **kwargs)
if self._return_state:
return output_1, output_2, state_1, state_2
return output_1, output_2
def _get_input_mask(self, inputs: Dict[str, tf.Tensor]) -> tf.Tensor:
return inputs.get(INPUT_MASK_NAME,
tf.ones_like(inputs[INPUT_ID_NAME], dtype=tf.int32))
def _run_dual_rnn(self, input_1, input_2, input_mask_1, input_mask_2,
initial_state, **kwargs):
raise NotImplementedError('Must implement method _run_dual_rnn.')
def _split_hidden_state_and_cell_state(self, state: Sequence[Any]):
"""Split the state into the hidden state (`h`) and optionally the cell state (`c`).
When the cell state is a tuple (e.g., LSTM), `state` is of format
[(h_1, c_1), (h_2, c_2), ..., (h_n, c_n)]
where n is the num_layers.
Otherwise `state` is of format
[h_1, h_2, ..., h_n]
Args:
state: the cell state of each layer of the RNN.
Returns:
a tuple of the hidden state and (the cell state or None).
"""
if utils.state_is_tuple(self._cell_type):
return [s[0] for s in state], [s[1] for s in state]
return state, None
class DualRNNEncoder(_DualRNN):
"""Dual RNN encoder."""
def _create_rnn(self):
cells = [
utils.get_rnn_cell(self._cell_type)(units=self._hidden_size)
for _ in range(self._num_layers)
]
return tf.keras.layers.RNN(cells, return_state=True, return_sequences=True)
def build(self, input_shape):
super().build(input_shape)
self.sent_rnn = self._create_rnn()
def _run_dual_rnn(self, input_1, input_2, input_mask_1, input_mask_2,
initial_state, **unused_kwargs):
del initial_state # Not used
output_1, state_1 = self._run_rnn(input_1, input_mask_1)
output_2, state_2 = self._run_rnn(input_2, input_mask_2)
hidden_state_1, _ = self._split_hidden_state_and_cell_state(state_1)
hidden_state_2, _ = self._split_hidden_state_and_cell_state(state_2)
return output_1, output_2, hidden_state_1, hidden_state_2
def _run_rnn(self, inputs, input_mask):
outputs = self.sent_rnn(inputs)
output = outputs[0]
state = outputs[1:]
seqlen = tf.reduce_sum(input_mask, axis=1)
final_step_output = utils.get_last_step(output, seqlen)
final_step_output = self.dropout(final_step_output)
return final_step_output, state
class DualRNNDecoder(_DualRNN):
"""Dual RNN decoder."""
def _create_rnn(self, hidden_size):
cells = [
utils.get_rnn_cell(self._cell_type)(
units=hidden_size, dropout=self._dropout)
for _ in range(self._num_layers)
]
return tf.keras.layers.RNN(cells, return_state=True, return_sequences=True)
def build(self, input_shape):
super().build(input_shape)
self.dec_rnn_1 = self._create_rnn(self._hidden_size)
self.project_1 = tf.keras.layers.Dense(self.embedding_layer.vocab_size)
self.dec_rnn_2 = self._create_rnn(self._hidden_size * 2)
self.project_2 = tf.keras.layers.Dense(self.embedding_layer.vocab_size)
def _run_dual_rnn(self, input_1, input_2, input_mask_1, input_mask_2,
initial_state, **unused_kwargs):
initial_state_1 = self._rnn1_initial_state(initial_state)
output_1, state_1 = self._run_rnn(input_1, input_mask_1, initial_state_1,
self.dec_rnn_1, self.dropout,
self.project_1)
initial_state_2 = self._rnn2_initial_state(initial_state, state_1)
output_2, state_2 = self._run_rnn(input_2, input_mask_2, initial_state_2,
self.dec_rnn_2, self.dropout,
self.project_2)
hidden_state_1, _ = self._split_hidden_state_and_cell_state(state_1)
hidden_state_2, _ = self._split_hidden_state_and_cell_state(state_2)
return output_1, output_2, hidden_state_1, hidden_state_2
def _run_rnn(self, inputs, input_mask, initial_state, rnn, dropout,
projection_layer):
del input_mask # Not used
outputs = rnn(inputs, initial_state=initial_state)
final_state = outputs[1:]
outputs = dropout(outputs[0])
outputs = projection_layer(outputs)
return outputs, final_state
def _concat_states(self, state_1: Sequence[tf.Tensor],
state_2: Sequence[tf.Tensor]) -> Sequence[tf.Tensor]:
return [tf.concat([s1, s2], axis=1) for s1, s2 in zip(state_1, state_2)]
def _rnn1_initial_state(self,
initial_state: Sequence[tf.Tensor]) -> Sequence[Any]:
if utils.state_is_tuple(self._cell_type):
return list(zip(initial_state, initial_state))
return initial_state
def _rnn2_initial_state(self, initial_state: Sequence[tf.Tensor],
rnn1_final_state: Sequence[Any]) -> Sequence[Any]:
(rnn1_final_hidden_state, rnn1_final_cell_state
) = self._split_hidden_state_and_cell_state(rnn1_final_state)
initial_hidden_state = self._concat_states(initial_state,
rnn1_final_hidden_state)
if utils.state_is_tuple(self._cell_type):
initial_cell_state = self._concat_states(initial_state,
rnn1_final_cell_state)
return list(zip(initial_hidden_state, initial_cell_state))
return initial_hidden_state
class _VAECell(tf.keras.layers.Layer):
"""VAE Cell base class.
It receives two sentences (with masks) and the initial state as inputs and
returns multiple
outputs (see call() for details).
It encodes->samples->decodes the inputs and updates the state in the
meanwhile. However, the connection between any of two sequential components
are component dependent. For example, the output of the sampler may not be
fitted as the input of decoder. So, to use the class, one needs to create an
inheriting class implementing
the "glue" methods such as `_post_process_samples()`.
"""
def __init__(self, encoder, sampler, decoder, state_updater):
super(_VAECell, self).__init__()
self.encoder = encoder
self.sampler = sampler
self.decoder = decoder
self.state_updater = state_updater
def _verify_and_prepare_inputs(self, inputs: Sequence[Any]):
if len(inputs) not in (5, 7):
raise ValueError(
'Inputs should be a sequence of length 5 (encoder_input_1, '
'encoderinput_2, decoder_input_1, decoder_input_2, state) or 7 '
'(encoder_input_1, encoderinput_2, decoder_input_1, decoder_input_2, '
'state, label, label_mask), found %s' % len(inputs))
(encoder_input_1, encoder_input_2, decoder_input_1, decoder_input_2,
state) = inputs[:5]
if len(inputs) == 7:
label, label_mask = inputs[5:]
else:
label = None
label_mask = None
return (encoder_input_1, encoder_input_2, decoder_input_1, decoder_input_2,
state, label, label_mask)
def _may_extract_from_tuple_state(self, state):
if isinstance(state, (list, tuple)):
return state[0]
return state
def _post_process_samples(self, samples: tf.Tensor) -> tf.Tensor:
raise NotImplementedError('Must implement method to post-process samples.')
def _project_encoder_outputs(self, inputs: Sequence[tf.Tensor]):
raise NotImplementedError(
'Must implement method to project encoder outputs.')
def _prepare_encoder_initial_state(self,
inputs: Sequence[tf.Tensor]) -> tf.Tensor:
raise NotImplementedError(
'Must implement method to prepare encoder initial state.')
def _prepare_decoder_initial_state(
self, inputs: Sequence[tf.Tensor]) -> Sequence[tf.Tensor]:
raise NotImplementedError(
'Must implement method to prepare decoder initial state.')
def _prepare_decoder_inputs(
self, inputs: Sequence[tf.Tensor]) -> Sequence[tf.Tensor]:
return inputs
def _prepare_state_updater_inputs(self, inputs: Sequence[Any]):
raise NotImplementedError(
'Must implement method to prepare state updater inputs.')
def _post_process_decoder_state(self,
state: Sequence[tf.Tensor]) -> tf.Tensor:
return state[0]
def _prepare_sample_logits(self, logits: tf.Tensor, label: Any,
label_mask: Any) -> tf.Tensor:
del self, label, label_mask # Unused.
return logits
def is_tuple_state(self):
return self.state_updater.is_tuple_state()
def call(self,
inputs: Sequence[Any],
return_states: Optional[bool] = False,
return_samples: Optional[bool] = False):
(encoder_input_1, encoder_input_2, decoder_input_1, decoder_input_2, state,
label, label_mask) = self._verify_and_prepare_inputs(inputs)
initial_state = self._may_extract_from_tuple_state(state)
encoder_initial_state = self._prepare_encoder_initial_state([initial_state])
encoder_outputs_1, encoder_outputs_2 = self.encoder(encoder_input_1,
encoder_input_2,
encoder_initial_state)
latent_state, sampler_inputs = self._project_encoder_outputs(
[encoder_outputs_1, encoder_outputs_2, initial_state])
sample_logits = self._prepare_sample_logits(sampler_inputs, label,
label_mask)
samples = self.sampler(sample_logits)
samples_processed = self._post_process_samples(samples)
decoder_initial_state = self._prepare_decoder_initial_state(
[initial_state, samples_processed])
decoder_input_1, decoder_input_2 = self._prepare_decoder_inputs(
[decoder_input_1, decoder_input_2])
(decoder_outputs_1, decoder_outputs_2, decoder_state_1,
decoder_state_2) = self.decoder(decoder_input_1, decoder_input_2,
decoder_initial_state)
decoder_state_1 = self._post_process_decoder_state(decoder_state_1)
decoder_state_2 = self._post_process_decoder_state(decoder_state_2)
decoder_initial_state = self._post_process_decoder_state(
decoder_initial_state)
state_updater_inputs = self._prepare_state_updater_inputs([
samples_processed,
encoder_outputs_1,
encoder_outputs_2,
state,
])
next_state = self.state_updater(state_updater_inputs)
outputs = [
sample_logits, decoder_outputs_1, decoder_outputs_2, next_state,
latent_state
]
if return_states:
outputs += [decoder_initial_state, decoder_state_1, decoder_state_2]
if return_samples:
outputs.append(samples)
return outputs
class _VanillaStateUpdater(tf.keras.layers.Layer):
"""Vanilla hidden state updater."""
def __init__(self, cell_type, units, dropout):
del dropout
super(_VanillaStateUpdater, self).__init__()
self._cell_type = cell_type
self._units = units
self.cell = utils.get_rnn_cell(cell_type)(units)
def call(self, inputs):
if not isinstance(inputs, (list, tuple)) or len(inputs) != 2:
raise ValueError('Expect inputs to be the sequence of length 2.')
inputs, state = inputs
_, state = self.cell(inputs, state)
return state
def is_tuple_state(self):
return utils.state_is_tuple(self._cell_type)
class _VanillaEncoderOutputProjector(tf.keras.layers.Layer):
"""The layer to project vanilla vae cell's encoder outputs."""
def __init__(self, hidden_sizes: Sequence[int], output_size: int,
dropout: float):
super(_VanillaEncoderOutputProjector, self).__init__()
self.mlp = utils.MLP(hidden_sizes, dropout=dropout)
self.project_layer = tf.keras.layers.Dense(output_size)
def call(self, inputs: Sequence[Any]):
if len(inputs) < 3:
raise ValueError(
'Expect inputs to be the sequence of length greater than 2, found {}.'
.format(len(inputs)))
encoder_input_1, encoder_input_2, initial_state = inputs[:3]
inputs = tf.concat([initial_state, encoder_input_1, encoder_input_2],
axis=1)
hidden = self.mlp(inputs)
return hidden, self.project_layer(hidden)
class VanillaLinearVAECell(_VAECell):
"""Vanilla linear VAE Cell class."""
def __init__(self, config: model_config.VanillaLinearVAECellConfig):
model_config.verify_embedding_configs(config.encoder_embedding,
config.decoder_embedding,
config.shared_embedding)
# Creates embedding layers for encoder and decoder.
self.encoder_embedding_layer = _build_embedding_layer(
config.encoder_embedding, config.max_seq_length)
if config.shared_embedding:
self.decoder_embedding_layer = self.encoder_embedding_layer
self.shared_embedding_layer = self.encoder_embedding_layer
else:
self.decoder_embedding_layer = _build_embedding_layer(
config.decoder_embedding, config.max_seq_length)
self.shared_embedding_layer = None
encoder = DualRNNEncoder(
hidden_size=config.encoder_hidden_size,
embedding_layer=self.encoder_embedding_layer,
num_layers=config.num_ecnoder_rnn_layers,
dropout=config.dropout,
cell_type=config.encoder_cell_type)
sampler = utils.GumbelSoftmaxSampler(config.temperature, hard=False)
decoder = DualRNNDecoder(
hidden_size=config.decoder_hidden_size,
embedding_layer=self.decoder_embedding_layer,
# Hardcoded to be 1 layer to align with pytorch version. Otherwise, we
# need to define the initial state for each layer in
# _prepare_decoder_initial_state and change _post_process_decoder_state
num_layers=1,
dropout=config.dropout,
cell_type=config.decoder_cell_type,
return_state=True)
state_updater = _VanillaStateUpdater(config.state_updater_cell_type,
config.num_states, config.dropout)
self._gumbel_softmax_label_adjustment_multiplier = (
config.gumbel_softmax_label_adjustment_multiplier)
self.encoder_output_projector = _VanillaEncoderOutputProjector(
hidden_sizes=list(config.encoder_projection_sizes),
output_size=config.num_states,
dropout=config.dropout)
self.sample_post_processor = utils.MLP(
config.sampler_post_processor_output_sizes, dropout=config.dropout)
super(VanillaLinearVAECell, self).__init__(
encoder=encoder,
sampler=sampler,
decoder=decoder,
state_updater=state_updater)
def init_bert_embedding_layers(
self, config: model_config.VanillaLinearVAECellConfig):
if config.encoder_embedding.embedding_type == model_config.BERT_EMBED:
(self.encoder_embedding_layer, _,
_) = bert_utils.load_bert_weight_from_ckpt(
bert_model=self.encoder_embedding_layer,
bert_ckpt_dir=config.encoder_embedding.bert_ckpt_dir)
if config.decoder_embedding.embedding_type == model_config.BERT_EMBED:
(self.decoder_embedding_layer, _,
_) = bert_utils.load_bert_weight_from_ckpt(
bert_model=self.decoder_embedding_layer,
bert_ckpt_dir=config.encoder_embedding.bert_ckpt_dir)
def _post_process_samples(self, samples: tf.Tensor) -> tf.Tensor:
return self.sample_post_processor(samples)
def _project_encoder_outputs(self, inputs: Sequence[tf.Tensor]):
return self.encoder_output_projector(inputs)
def _prepare_encoder_initial_state(self, inputs: Sequence[tf.Tensor]):
# Encoder don't use external initial state.
return None
def _prepare_decoder_initial_state(
self, inputs: Sequence[tf.Tensor]) -> Sequence[tf.Tensor]:
if len(inputs) < 2:
raise ValueError(
'Expect inputs to be the sequence of length greater than 1, found {}.'
.format(len(inputs)))
initial_state, samples_processed = inputs[0], inputs[1]
return [tf.concat([initial_state, samples_processed], axis=1)]
def _prepare_decoder_inputs( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
self, inputs: _TensorMapList) -> _TensorMapList:
last_step_removed = [
{key: value[:, :-1] for key, value in input.items()} for input in inputs
]
return last_step_removed
def _prepare_state_updater_inputs(self, inputs: Sequence[Any]):
if len(inputs) < 4:
raise ValueError(
'Expect inputs to be the sequence of length greater than 3, found {}.'
.format(len(inputs)))
samples_processed, encoder_inputs_1, encoder_inputs_2, state = inputs[:4]
inputs = tf.concat([samples_processed, encoder_inputs_1, encoder_inputs_2],
axis=1)
return [inputs, state]
def _prepare_sample_logits(self, logits: tf.Tensor,
label: Optional[tf.Tensor],
label_mask: Any) -> tf.Tensor:
if label is None and label_mask is None:
return super()._prepare_sample_logits(logits, label, label_mask)
if label is None or label_mask is None:
raise ValueError(
'label and label_mask must be both specified, found one is None')
# Add weighted one-hot label to the sample logits.
# See https://aclanthology.org/2021.naacl-main.374.pdf for details.
# Expand the dimension for broadcast multiply.
label_mask = tf.expand_dims(label_mask, axis=-1)
logits_label_adjument = tf.norm(
logits, axis=-1, keepdims=True) * tf.cast(
label, logits.dtype) * tf.cast(label_mask, logits.dtype)
return logits + self._gumbel_softmax_label_adjustment_multiplier * logits_label_adjument
| apache-2.0 |
kylerbrown/scikit-learn | benchmarks/bench_mnist.py | 153 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
dsquareindia/scikit-learn | examples/cluster/plot_face_ward_segmentation.py | 70 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/utils/__init__.py | 13 | 13265 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import DataConversionWarning
from .deprecation import deprecated
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric", "indices_to_mask", "deprecated"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d "
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=np.bool)
mask[indices] = True
return mask
| bsd-3-clause |
yavuzovski/playground | machine learning/Udacity/ud120-projects/choose_your_own/your_algorithm.py | 1 | 2434 | #!/usr/bin/python
from time import time
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii] == 0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii] == 0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii] == 1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii] == 1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color="b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color="r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# k-nearest neighbors
# from sklearn.neighbors import KNeighborsClassifier
# clf = KNeighborsClassifier(n_neighbors=20, p=12)
# t0 = time()
# clf.fit(features_train, labels_train)
# print("training time: {0}".format(round(time() - t0, 3))) # 0.001
# print("accuracy: {0}".format(clf.score(features_test, labels_test))) # 0.944
# adaboost
# from sklearn.ensemble import AdaBoostClassifier
# clf = AdaBoostClassifier(learning_rate=0.3)
# t0 = time()
# clf.fit(features_train, labels_train)
# print("training time: {0}".format(round(time() - t0, 3))) # 0.126
# print("accuracy: {0}".format(clf.score(features_test, labels_test))) # 0.928
# random forest
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=50, min_samples_split=50)
t0 = time()
clf.fit(features_train, labels_train)
print("training time: {0}".format(round(time() - t0, 3))) # 0.155
print("accuracy: {0}".format(clf.score(features_test, labels_test))) # 0.924
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| gpl-3.0 |
xyguo/scikit-learn | examples/applications/plot_out_of_core_classification.py | 31 | 13829 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | examples/cluster/plot_cluster_iris.py | 347 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/ensemble/forest.py | 8 | 67993 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/applications/svm_gui.py | 285 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 228 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/__check_build/__init__.py | 342 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
ishanic/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 228 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 228 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/tests/test_common.py | 126 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 228 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
ishanic/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 228 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
ashhher3/pylearn2 | pylearn2/datasets/hdf5_deprecated.py | 30 | 13414 | """
Objects for datasets serialized in HDF5 format (.h5).
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
try:
import h5py
except ImportError:
h5py = None
import numpy as np
from theano.compat.six.moves import xrange
import warnings
from pylearn2.datasets.dense_design_matrix import (DenseDesignMatrix,
DefaultViewConverter)
from pylearn2.space import CompositeSpace, VectorSpace, IndexSpace
from pylearn2.utils.iteration import FiniteDatasetIterator, safe_izip
from pylearn2.utils import contains_nan
class HDF5DatasetDeprecated(DenseDesignMatrix):
"""
Dense dataset loaded from an HDF5 file.
Parameters
----------
filename : str
HDF5 file name.
X : str, optional
Key into HDF5 file for dataset design matrix.
topo_view: str, optional
Key into HDF5 file for topological view of dataset.
y : str, optional
Key into HDF5 file for dataset targets.
load_all : bool, optional (default False)
If true, datasets are loaded into memory instead of being left
on disk.
cache_size: int, optionally specify the size in bytes for the chunk
cache of the HDF5 library. Useful when the HDF5 files has large
chunks and when using a sequantial iterator. The chunk cache allows
to only access the disk for the chunks and then copy the batches to
the GPU from memory, which can result in a significant speed up.
Sensible default values depend on the size of your data and the
batch size you wish to use. A rule of thumb is to make a chunk
contain 100 - 1000 batches and make sure they encompass complete
samples.
kwargs : dict, optional
Keyword arguments passed to `DenseDesignMatrix`.
"""
def __init__(self, filename, X=None, topo_view=None, y=None,
load_all=False, cache_size=None, **kwargs):
self.load_all = load_all
if h5py is None:
raise RuntimeError("Could not import h5py.")
if cache_size:
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[2] = cache_size
propfaid.set_cache(*settings)
fid = h5py.h5f.open(filename, fapl=propfaid)
self._file = h5py.File(fid)
else:
self._file = h5py.File(filename)
if X is not None:
X = self.get_dataset(X, load_all)
if topo_view is not None:
topo_view = self.get_dataset(topo_view, load_all)
if y is not None:
y = self.get_dataset(y, load_all)
super(HDF5DatasetDeprecated, self).__init__(X=X, topo_view=topo_view,
y=y, **kwargs)
def _check_labels(self):
"""
Sanity checks for X_labels and y_labels.
Since the np.all test used for these labels does not work with HDF5
datasets, we issue a warning that those values are not checked.
"""
if self.X_labels is not None:
assert self.X is not None
assert self.view_converter is None
assert self.X.ndim <= 2
if self.load_all:
assert np.all(self.X < self.X_labels)
else:
warnings.warn("HDF5Dataset cannot perform test np.all(X < " +
"X_labels). Use X_labels at your own risk.")
if self.y_labels is not None:
assert self.y is not None
assert self.y.ndim <= 2
if self.load_all:
assert np.all(self.y < self.y_labels)
else:
warnings.warn("HDF5Dataset cannot perform test np.all(y < " +
"y_labels). Use y_labels at your own risk.")
def get_dataset(self, dataset, load_all=False):
"""
Get a handle for an HDF5 dataset, or load the entire dataset into
memory.
Parameters
----------
dataset : str
Name or path of HDF5 dataset.
load_all : bool, optional (default False)
If true, load dataset into memory.
"""
if load_all:
data = self._file[dataset][:]
else:
data = self._file[dataset]
data.ndim = len(data.shape) # hdf5 handle has no ndim
return data
def iterator(self, *args, **kwargs):
"""
Get an iterator for this dataset.
The FiniteDatasetIterator uses indexing that is not supported by
HDF5 datasets, so we change the class to HDF5DatasetIterator to
override the iterator.next method used in dataset iteration.
Parameters
----------
WRITEME
"""
iterator = super(HDF5DatasetDeprecated, self).iterator(*args, **kwargs)
iterator.__class__ = HDF5DatasetIterator
return iterator
def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
"""
Set up dataset topological view, without building an in-memory
design matrix.
This is mostly copied from DenseDesignMatrix, except:
* HDF5ViewConverter is used instead of DefaultViewConverter
* Data specs are derived from topo_view, not X
* NaN checks have been moved to HDF5DatasetIterator.next
Note that y may be loaded into memory for reshaping if y.ndim != 2.
Parameters
----------
V : ndarray
Topological view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
shape = [V.shape[axes.index('b')],
V.shape[axes.index(0)],
V.shape[axes.index(1)],
V.shape[axes.index('c')]]
self.view_converter = HDF5ViewConverter(shape[1:], axes=axes)
self.X = self.view_converter.topo_view_to_design_mat(V)
# self.X_topo_space stores a "default" topological space that
# will be used only when self.iterator is called without a
# data_specs, and with "topo=True", which is deprecated.
self.X_topo_space = self.view_converter.topo_space
# Update data specs
X_space = VectorSpace(dim=V.shape[axes.index('b')])
X_source = 'features'
if self.y is None:
space = X_space
source = X_source
else:
if self.y.ndim == 1:
dim = 1
else:
dim = self.y.shape[-1]
# check if y_labels has been specified
if getattr(self, 'y_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.y_labels)
elif getattr(self, 'max_labels', None) is not None:
y_space = IndexSpace(dim=dim, max_labels=self.max_labels)
else:
y_space = VectorSpace(dim=dim)
y_source = 'targets'
space = CompositeSpace((X_space, y_space))
source = (X_source, y_source)
self.data_specs = (space, source)
self.X_space = X_space
self._iter_data_specs = (X_space, X_source)
class HDF5DatasetIterator(FiniteDatasetIterator):
"""
Dataset iterator for HDF5 datasets.
FiniteDatasetIterator expects a design matrix to be available, but this
will not always be the case when using HDF5 datasets with topological
views.
Parameters
----------
dataset : Dataset
Dataset over which to iterate.
subset_iterator : object
Iterator that returns slices of the dataset.
data_specs : tuple, optional
A (space, source) tuple.
return_tuple : bool, optional (default False)
Whether to return a tuple even if only one source is used.
convert : list, optional
A list of callables (in the same order as the sources in
data_specs) that will be applied to each slice of the dataset.
"""
def next(self):
"""
Get the next subset of the dataset during dataset iteration.
Converts index selections for batches to boolean selections that
are supported by HDF5 datasets.
"""
next_index = self._subset_iterator.next()
# convert to boolean selection
sel = np.zeros(self.num_examples, dtype=bool)
sel[next_index] = True
next_index = sel
rval = []
for data, fn in safe_izip(self._raw_data, self._convert):
try:
this_data = data[next_index]
except TypeError:
# FB: Why this try..except is there? I think this is useless.
# Do not hide the original if we can't fall back.
# FV: This is triggered if the shape of next_index is
# incompatible with the shape of the dataset. See for an
# example test_hdf5_topo_view(), where where i
# next.index.shape = (10,) and data is 'data': <HDF5
# dataset "y": shape (10, 3), type "<f8">
# I think it would be better to explicitly check if
# next_index.shape is incompatible with data.shape, for
# instance checking if next_index.ndim == data.ndim
if data.ndim > 1:
this_data = data[next_index, :]
else:
raise
# Check if the dataset data is a vector and transform it into a
# one-column matrix. This is needed to automatically convert the
# shape of the data later (in the format_as method of the
# Space.)
if fn:
this_data = fn(this_data)
assert not contains_nan(this_data)
rval.append(this_data)
rval = tuple(rval)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
class HDF5ViewConverter(DefaultViewConverter):
"""
View converter that doesn't have to transpose the data.
In order to keep data on disk, does not generate a full design matrix.
Instead, an instance of HDF5TopoViewConverter is returned, which
transforms data from the topological view into the design view for each
batch.
Parameters
----------
shape : tuple
Shape of this view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
def topo_view_to_design_mat(self, V):
"""
Generate a design matrix from the topological view.
This override of DefaultViewConverter.topo_view_to_design_mat does
not attempt to transpose the topological view, since transposition
is not supported by HDF5 datasets.
Parameters
----------
WRITEME
"""
v_shape = (V.shape[self.axes.index('b')],
V.shape[self.axes.index(0)],
V.shape[self.axes.index(1)],
V.shape[self.axes.index('c')])
if np.any(np.asarray(self.shape) != np.asarray(v_shape[1:])):
raise ValueError('View converter for views of shape batch size '
'followed by ' + str(self.shape) +
' given tensor of shape ' + str(v_shape))
rval = HDF5TopoViewConverter(V, self.axes)
return rval
class HDF5TopoViewConverter(object):
"""
Class for transforming batches from the topological view to the design
matrix view.
Parameters
----------
topo_view : HDF5 dataset
On-disk topological view.
axes : tuple, optional (default ('b', 0, 1, 'c'))
Order of axes in topological view.
"""
def __init__(self, topo_view, axes=('b', 0, 1, 'c')):
self.topo_view = topo_view
self.axes = axes
self.topo_view_shape = (topo_view.shape[axes.index('b')],
topo_view.shape[axes.index(0)],
topo_view.shape[axes.index(1)],
topo_view.shape[axes.index('c')])
self.pixels_per_channel = (self.topo_view_shape[1] *
self.topo_view_shape[2])
self.n_channels = self.topo_view_shape[3]
self.shape = (self.topo_view_shape[0],
np.product(self.topo_view_shape[1:]))
self.ndim = len(self.shape)
def __getitem__(self, item):
"""
Indexes the design matrix and transforms the requested batch from
the topological view.
Parameters
----------
item : slice or ndarray
Batch selection. Either a slice or a boolean mask.
"""
sel = [slice(None)] * len(self.topo_view_shape)
sel[self.axes.index('b')] = item
sel = tuple(sel)
V = self.topo_view[sel]
batch_size = V.shape[self.axes.index('b')]
rval = np.zeros((batch_size,
self.pixels_per_channel * self.n_channels),
dtype=V.dtype)
for i in xrange(self.n_channels):
ppc = self.pixels_per_channel
sel = [slice(None)] * len(V.shape)
sel[self.axes.index('c')] = i
sel = tuple(sel)
rval[:, i * ppc:(i + 1) * ppc] = V[sel].reshape(batch_size, ppc)
return rval
| bsd-3-clause |
zxsted/scipy | scipy/stats/mstats_basic.py | 18 | 82304 | """
An extension of scipy.stats.stats to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ?
# TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__all__ = ['argstoarray',
'betai',
'count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
from scipy._lib.six import iteritems
import itertools
import warnings
from collections import namedtuple
from . import distributions
import scipy.special as special
from . import futil
from ._stats_mstats_common import (
linregress as stats_linregress,
theilslopes as stats_theilslopes
)
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" (%s <> %s)" % (na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
marr = ma.compressed(arr)
if not marr.size:
return (np.array(0), np.array(0))
(v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True))
return (v1[:n], v2[:n])
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Notes
-----
For more details, see `stats.mode`.
"""
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
not_masked_indices = ma.flatnotmasked_edges(a)
first_not_masked_index = not_masked_indices[0]
return (a[first_not_masked_index], 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(*output)
@np.deprecate(message="mstats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead.")
def betai(a, b, x):
"""
betai() is deprecated in scipy 0.17.0.
For details about this function, see `stats.betai`.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as `x` increases, so does
`y`. Negative correlations imply that as `x` increases, `y` decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1-D array_like
Input
y : 1-D array_like
Input
Returns
-------
pearsonr : float
Pearson's correlation coefficient, 2-tailed p-value.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
r_num = ma.add.reduce(xm*ym)
r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym))
r = r_num / r_den
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n - 2
if r is masked or abs(r) == 1.0:
prob = 0.
else:
t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r
prob = _betai(0.5*df, 0.5, df/(df + t_squared))
return r, prob
def spearmanr(x, y, use_ties=True):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : array_like
The length of `x` must be > 2.
y : array_like
The length of `y` must be > 2.
use_ties : bool, optional
Whether the correction for ties should be computed.
Returns
-------
correlation : float
Spearman correlation coefficient
pvalue : float
2-tailed p-value.
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = _betai(0.5*df, 0.5, df/(df + t * t))
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
return SpearmanrResult(rho, prob)
def kendalltau(x, y, use_ties=True, use_missing=False):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
correlation : float
Kendall tau
pvalue : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if n < 2:
return KendalltauResult(np.nan, np.nan)
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return KendalltauResult(tau, prob)
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
def pointbiserialr(x, y):
"""Calculates a point biserial correlation coefficient and its p-value.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
For more details on `pointbiserialr`, see `stats.pointbiserialr`.
"""
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = _betai(0.5*df, 0.5, df/(df+t*t))
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
return PointbiserialrResult(rpb, prob)
def linregress(*args):
"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if len(args) == 1:
# Input is a single 2-D array containing x and y
args = ma.array(args[0], copy=True)
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:, 0]
y = args[:, 1]
else:
# Input is two 1-D arrays
x = ma.array(args[0]).flatten()
y = ma.array(args[1]).flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
slope, intercept, r, prob, sterrest = stats_linregress(x.data[~m],
y.data[~m])
else:
# All data is masked
return None, None, None, None, None
else:
slope, intercept, r, prob, sterrest = stats_linregress(x.data, y.data)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
if stats_linregress.__doc__:
linregress.__doc__ = stats_linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
For more details on `theilslopes`, see `stats.theilslopes`.
"""
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `stats.theilslopes`
return stats_theilslopes(y, x, alpha=alpha)
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
def ttest_1samp(a, popmean, axis=0):
"""
Calculates the T-test for the mean of ONE group of scores.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_1samp`, see `stats.ttest_1samp`.
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
df = n - 1.
svar = ((n - 1) * v) / df
t = (x - popmean) / ma.sqrt(svar / n)
prob = _betai(0.5*df, 0.5, df/(df + t*t))
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
return Ttest_1sampResult(t, prob)
ttest_onesamp = ttest_1samp
def ttest_ind(a, b, axis=0, equal_var=True):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True, perform a standard independent 2 sample test that assumes equal
population variances.
If False, perform Welch's t-test, which does not assume equal population
variance.
.. versionadded:: 0.17.0
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
For more details on `ttest_ind`, see `stats.ttest_ind`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
if equal_var:
df = n1 + n2 - 2.
svar = ((n1-1)*v1+(n2-1)*v2) / df
denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
else:
vn1 = v1/n1
vn2 = v2/n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero.
# It doesn't matter what df is as long as it is not NaN.
df = np.where(np.isnan(df), 1, df)
denom = ma.sqrt(vn1 + vn2)
t = (x1-x2) / denom
t = ma.filled(t, 1) # replace NaN t-values with 1.0
probs = _betai(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape)
return Ttest_indResult(t, probs.squeeze())
def ttest_rel(a, b, axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
For more details on `ttest_rel`, see `stats.ttest_rel`.
"""
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = (n-1.0)
d = (a-b).astype('d')
denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) / df)
t = ma.add.reduce(d, axis) / denom
t = ma.filled(t, 1)
probs = _betai(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze()
return Ttest_relResult(t, probs)
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u, prob)
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
For more details on `kruskal`, see `stats.kruskal`.
"""
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = distributions.chi2.sf(H, df)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
return KruskalResult(H, prob)
kruskalwallis = kruskal
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
@np.deprecate(message="mstats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
threshold : ndarray
Returns `a`, with values less then `threshmin` and values greater
`threshmax` replaced with `newval`.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> from scipy.stats.mstats import trim
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> print(trim(z,(3,8)))
[-- -- 3 4 5 6 7 8 -- --]
>>> print(trim(z,(0.1,0.2),relative=True))
[-- 2 3 4 5 6 7 8 -- --]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__ is not None:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def tmean(a, limits=None, inclusive=(True,True)):
"""
Compute the trimmed mean.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tmean : float
Notes
-----
For more details on `tmean`, see `stats.tmean`.
"""
return trima(a, limits=limits, inclusive=inclusive).mean()
def tvar(a, limits=None, inclusive=(True,True)):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
For more details on `tvar`, see `stats.tvar`.
"""
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
r = trima(a, limits=limits, inclusive=inclusive).var() * (n/(n-1.))
else:
raise ValueError('mstats.tvar() with limits not implemented yet so far')
return r
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin : float, int or ndarray
Notes
-----
For more details on `tmin`, see `stats.tmin`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float, int or ndarray
Notes
-----
For more details on `tmax`, see `stats.tmax`.
"""
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tsem(a, limits=None, inclusive=(True,True)):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tsem : float
Notes
-----
For more details on `tsem`, see `stats.tsem`.
"""
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(ddof=1)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(ddof=1))
return sd / np.sqrt(am.count())
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit)
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc)
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Parameters
----------
a : array_like
data
moment : int, optional
order of central moment that is returned
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
Notes
-----
For more details about `moment`, see `stats.moment`.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - ma.expand_dims(a.mean(axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
Notes
-----
For more details about `variation`, see `stats.variation`.
"""
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
For more details about `skew`, see `stats.skew`.
"""
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
Notes
-----
For more details about `kurtosis`, see `stats.kurtosis`.
"""
a, axis = _chk_asarray(a, axis)
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=0, bias=True):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> from scipy.stats.mstats import describe
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
DescribeResult(nobs=array(3), minmax=(masked_array(data = 0,
mask = False,
fill_value = 999999)
, masked_array(data = 2,
mask = False,
fill_value = 999999)
), mean=1.0, variance=0.66666666666666663, skewness=masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
, kurtosis=-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
For more details about `skewtest`, see `stats.skewtest`.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
For more details about `kurtosistest`, see `stats.kurtosistest`.
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom < 0] = masked
elif denom < 0:
denom = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
Notes
-----
For more details about `normaltest`, see `stats.normaltest`.
"""
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: http://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
... [ 47., 15., 2.],
... [ 49., 36., 3.],
... [ 15., 39., 4.],
... [ 42., 40., -999.],
... [ 41., 41., -999.],
... [ 7., -999., -999.],
... [ 39., -999., -999.],
... [ 43., -999., -999.],
... [ 40., -999., -999.],
... [ 36., -999., -999.]])
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[ 19.2 14.6 1.45]
[ 40. 37.5 2.5 ]
[ 42.8 40.05 3.55]]
>>> data[:, 2] = -999.
>>> print(mquantiles(data, axis=0, limit=(0, 50)))
[[19.200000000000003 14.6 --]
[40.0 37.5 --]
[42.800000000000004 40.05 --]]
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
@np.deprecate(message="mstats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`stats.sem` as well as with the most common definition used (like in the R
documentation).
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> print(stats.mstats.sem(a))
[2.8284271247461903 2.8284271247461903 2.8284271247461903
2.8284271247461903]
Find standard error across the whole array, using n degrees of freedom:
>>> print(stats.mstats.sem(a, axis=None, ddof=0))
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
@np.deprecate(message="mstats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivariate data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq,
distributions.chi2.sf(chisq, k-1))
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/svm/plot_svm_scale_c.py | 222 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
google/uncertainty-baselines | experimental/cifar10_resnet20/main.py | 1 | 5433 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""CIFAR-10 ResNet-20 example for Uncertainty Baselines.
"""
import os.path
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import uncertainty_baselines as ub
# Flags relating to hyperparameters.
flags.DEFINE_integer('batch_size', 512, 'The training batch size.')
flags.DEFINE_integer('eval_batch_size', 100, 'The evaluation batch size.')
flags.DEFINE_string('optimizer', 'adam', 'The optimizer to train with.')
flags.DEFINE_float('learning_rate', 0.01, 'The learning rate.')
flags.DEFINE_float(
'weight_decay',
None,
'The model decoupled weight decay rate.')
# Flags relating to setting up the job.
flags.DEFINE_bool('use_tpu', False, 'Whether to run on CPU or TPU.')
flags.DEFINE_string('tpu', '', 'Name of the TPU to use.')
# Flags relating to the training/eval loop.
flags.DEFINE_string('output_dir', None, 'Base output directory.')
flags.DEFINE_integer(
'eval_frequency',
100,
'How many steps between evaluating on the (validation and) test set.')
flags.DEFINE_integer('train_steps', 2000, 'How many steps to train for.')
flags.DEFINE_integer('seed', 1337, 'Random seed.')
FLAGS = flags.FLAGS
def _check_batch_replica_divisible(
total_batch_size: int,
strategy: tf.distribute.Strategy):
"""Ensure the batch size is evenly divisible by the number of replicas."""
if total_batch_size % strategy.num_replicas_in_sync != 0:
raise ValueError(
'Batch size must be evenly divisible by the number of replicas in the '
'job. Total batch size: {}, num replicas: {}'.format(
total_batch_size, strategy.num_replicas_in_sync))
def _ds_as_tuple(ds):
return ds.map(lambda d: (d['features'], d['labels']))
def run(trial_dir: str):
"""Run the experiment."""
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
strategy = ub.strategy_utils.get_strategy(FLAGS.tpu, FLAGS.use_tpu)
with strategy.scope():
# Setup CIFAR-10 tf.data.Dataset splits.
# Use 5000 validation images.
train_dataset_builder = ub.datasets.Cifar10Dataset(
split='train', validation_percent=0.1)
train_dataset = train_dataset_builder.load(batch_size=FLAGS.batch_size)
train_dataset = _ds_as_tuple(train_dataset)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
val_dataset_builder = ub.datasets.Cifar10Dataset(
split='validation', validation_percent=0.1)
val_dataset = val_dataset_builder.load(batch_size=FLAGS.eval_batch_size)
val_dataset = _ds_as_tuple(val_dataset)
val_dataset = strategy.experimental_distribute_dataset(val_dataset)
test_dataset_builder = ub.datasets.Cifar10Dataset(split='test')
test_dataset = test_dataset_builder.load(batch_size=FLAGS.eval_batch_size)
test_dataset = _ds_as_tuple(test_dataset)
test_dataset = strategy.experimental_distribute_dataset(test_dataset)
# Setup optimizer.
_check_batch_replica_divisible(FLAGS.batch_size, strategy)
_check_batch_replica_divisible(FLAGS.eval_batch_size, strategy)
optimizer = ub.optimizers.get(
optimizer_name=FLAGS.optimizer,
learning_rate_schedule='constant',
learning_rate=FLAGS.learning_rate,
weight_decay=FLAGS.weight_decay)
# Setup model.
model = ub.models.resnet20(
batch_size=FLAGS.batch_size, l2_weight=None)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
# Train and eval.
steps_per_epoch = train_dataset_builder.num_examples // FLAGS.batch_size
validation_steps = (
val_dataset_builder.num_examples // FLAGS.eval_batch_size)
history = model.fit(
x=train_dataset,
batch_size=FLAGS.batch_size,
epochs=FLAGS.train_steps // steps_per_epoch,
steps_per_epoch=steps_per_epoch,
validation_data=val_dataset,
validation_steps=validation_steps,
validation_freq=FLAGS.eval_frequency,
shuffle=False)
logging.info(history)
test_steps = test_dataset_builder.num_examples // FLAGS.eval_batch_size
test_result = model.evaluate(
x=test_dataset,
batch_size=FLAGS.eval_batch_size,
steps=test_steps)
logging.info(test_result)
# Save a checkpoint after training.
if trial_dir:
model.save_weights(
os.path.join(trial_dir, 'model.ckpt-{}'.format(FLAGS.train_steps)))
def main(argv):
del argv
logging.info('Starting CIFAR-10 ResNet-20 experiment!')
trial_dir = os.path.join(FLAGS.output_dir, '0')
logging.info('Saving to dir: %s', trial_dir)
if not tf.io.gfile.exists(trial_dir):
tf.io.gfile.makedirs(trial_dir)
return run(trial_dir)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
fredhusser/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 156 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
icoderaven/slytherin_dagger | src/linear_predictor.py | 1 | 4306 | #!/usr/bin/env python
import math
import numpy as np
import scipy
import scipy.linalg as la
import os
from sklearn import linear_model
class LinearPredictor:
def __init__(self):
self.m_w = 0
self.m_mean_x = 0
self.m_mean_y = 0
self.m_std_x = 1
# ----------------------------------------------------------------------
#load predictor from file
#----------------------------------------------------------------------
def load(self, filename):
A = np.load(filename) #load numpy array stored in filename
self.m_w = A[0, :]
self.m_mean_x = A[1, :]
self.m_std_x = A[2, :]
self.m_mean_y = A[3, 0]
#----------------------------------------------------------------------
#compute ouput prediction given input features
#----------------------------------------------------------------------
def predict(self, feat_array):
#renormalize features
xtmp = (feat_array - self.m_mean_x) / self.m_std_x
#compute dot product between features and predictor
return np.dot(xtmp, self.m_w) + self.m_mean_y
def to_string(self):
print self.m_w
def load(filename):
pred = LinearPredictor()
pred.load(filename)
return pred
def train(X, y, filename, options, feature_weight=np.array([1.0]), sample_weight_type="None", print_flag=0):
mean_x = X.mean(0)
std_x = X.std(0)
mean_y = y.mean(0)
n = mean_x.size
# hack to keep bias feature when removing mean and renormalizing with std
#mean_x[n - 1] = 0;
#std_x[n - 1] = 1;
for index, x in enumerate(std_x):
if x == 0.0:
std_x[index] = 1.0
print "WARNING: Failing index with zero stddev = %d" % index
#renormalize features
X = (X - mean_x) / std_x
(r, c) = X.shape
#solve ridge regression
if options.size == 0:
options = np.array([1])
# compute sample weights
y = y
m = y.size
sample_weights = np.ones(m)
X_sub = np.array([])
y_sub = np.array([])
nonzero_val = 0.01
if sample_weight_type == "weighted":
nb_nonzero = np.sum(abs(y) > nonzero_val)
weight_nonzero = m / (2.0 * nb_nonzero)
weight_zero = m / (2.0 * (m - nb_nonzero))
sample_weights[abs(y) > nonzero_val] = weight_nonzero
sample_weights[abs(y) <= nonzero_val] = weight_zero
elif sample_weight_type == "subsample":
nb_nonzero = np.sum(abs(y) > nonzero_val)
if nb_nonzero < m - nb_nonzero:
X_sub = X[abs(y) > nonzero_val, :]
y_sub = y[abs(y) > nonzero_val]
Xtmp = X[abs(y) <= nonzero_val, :]
ytmp = y[abs(y) <= nonzero_val]
X_sub = np.vstack((X_sub, Xtmp[range(0, m - nb_nonzero, int((m - nb_nonzero) / nb_nonzero)), :]))
y_sub = np.append(y_sub, ytmp[range(0, m - nb_nonzero, int((m - nb_nonzero) / nb_nonzero))])
y_sub = y_sub - mean_y
else:
X_sub = X[abs(y) <= nonzero_val, :]
y_sub = y[abs(y) <= nonzero_val]
Xtmp = X[abs(y) > nonzero_val, :]
ytmp = y[abs(y) > nonzero_val]
X_sub = np.vstack((X_sub, Xtmp[range(0, nb_nonzero, int(nb_nonzero / (m - nb_nonzero))), :]))
y_sub = np.append(y_sub, ytmp[range(0, nb_nonzero, int(nb_nonzero / (m - nb_nonzero)))])
y_sub = y_sub - mean_y
y = y - mean_y
A = np.zeros((4, n))
A[1, :] = mean_x
A[2, :] = std_x
A[3,:] = mean_y
for i in range(options.size):
#print "[DAgger] Training with Regularizer %f" % (options[i])
reg = math.sqrt(r) * options[i]
outname, outext = os.path.splitext(filename)
fname = "%s-%f%s" % (outname, options[i], outext)
reg_algo = linear_model.Ridge(alpha=reg, fit_intercept=False)
#reg_algo = linear_model.Lasso(alpha=reg/math.sqrt(r), fit_intercept=False)
if sample_weight_type == "None":
reg_algo.fit(X,y)
w = reg_algo.coef_
elif sample_weight_type == "subsample":
reg_algo.fit(X_sub,y_sub)
w = reg_algo.coef_
if print_flag ==1:
print "[DAgger] learned weights for reg ", options[i], ": "
print w
A[0, :] = w
np.save(fname, A)
| bsd-3-clause |
mythsmith/veusz | veusz/plugins/field.py | 1 | 14590 | # Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""Data entry fields for plugins."""
from __future__ import division
from .. import qtall as qt4
from .. import utils
from .. import setting
class Field(object):
"""A class to represent an input field on the dialog or command line."""
def __init__(self, name, descr=None, default=None):
"""name: name of field
descr: description to show to user
default: default value."""
self.name = name
if descr:
self.descr = descr
else:
self.descr = name
self.default = default
def makeControl(self, doc, currentwidget):
"""Create a set of controls for field."""
return None
def setControlVal(self, controls, val):
"""Update control's value to val."""
pass
def getControlResults(self, cntrls):
"""Get result from created contrls."""
return None
class FieldText(Field):
"""Text entry on the dialog."""
def makeControl(self, doc, currentwidget):
l = qt4.QLabel(self.descr)
e = qt4.QLineEdit()
if self.default:
e.setText(self.default)
return (l, e)
def setControlVal(self, controls, val):
controls[1].setText(val)
def getControlResults(self, cntrls):
return cntrls[1].text()
class FieldCombo(Field):
"""Drop-down combobox on dialog."""
def __init__(self, name, descr=None, default=None, items=(),
editable=True):
"""name: name of field
descr: description to show to user
default: default value
items: items in drop-down box
editable: whether user can enter their own value."""
Field.__init__(self, name, descr=descr, default=default)
self.items = items
self.editable = editable
def makeControl(self, doc, currentwidget):
l = qt4.QLabel(self.descr)
c = qt4.QComboBox()
c.addItems(self.items)
c.setEditable(bool(self.editable))
if self.default:
self.setControlVal((l, c), self.default)
return (l, c)
def setControlVal(self, controls, val):
"""Update value to val."""
if self.editable:
controls[1].setEditText(val)
else:
controls[1].setCurrentIndex(controls[1].findText(val))
def getControlResults(self, cntrls):
return cntrls[1].currentText()
class _WidgetCombo(qt4.QComboBox):
"""Combo box for selecting widgets."""
def __init__(self, doc, widgettypes, default):
"""doc: Veusz document
widgettypes: set of allowed widgettypes or empty for all
default: default path."""
qt4.QComboBox.__init__(self)
self.doc = doc
self.widgettypes = widgettypes
self.default = default
self.updateWidgets()
doc.signalModified.connect(self.updateWidgets)
def _iterateWidgets(self, comboitems, paths, widget, level):
"""Walk widget tree recursively.
Adds name onto a list of strings (comboitems)
Adds path to widget onto list of paths (paths)
"""
if not self.widgettypes or widget.typename in self.widgettypes:
comboitems.append(' '*level + widget.name)
paths.append(widget.path)
for w in widget.children:
self._iterateWidgets(comboitems, paths, w, level+1)
@qt4.pyqtSlot()
def updateWidgets(self):
"""Update combo with new widgets."""
self.paths = [] # veusz widget paths of items
comboitems = [] # names of items (with tree spacing)
self._iterateWidgets(comboitems, self.paths, self.doc.basewidget, 0)
if self.count() == 0:
# first time around add default to get it selected, yuck :-(
try:
idx = self.paths.index(self.default)
self.addItem( comboitems[idx] )
except ValueError:
pass
utils.populateCombo(self, comboitems)
def getWidgetPath(self):
"""Get path of selected widget."""
return self.paths[self.currentIndex()]
class FieldWidget(Field):
"""Drop-down combobox for selecting widgets."""
def __init__(self, name, descr=None, default='/', widgettypes=set()):
"""name: name of field
descr: description to show to user
default: default value - set to '' to get current widget."""
Field.__init__(self, name, descr=descr, default=default)
self.widgettypes = widgettypes
def makeControl(self, doc, currentwidget):
default = self.default
if default == '':
default = currentwidget
l = qt4.QLabel(self.descr)
c = _WidgetCombo(doc, self.widgettypes, default)
return (l, c)
def setControlVal(self, controls, val):
controls[1].setCurrentIndex(controls[1].findText(val))
def getControlResults(self, cntrls):
return cntrls[1].getWidgetPath()
class _FieldSetting(Field):
"""Field using a setting internally to avoid code duplication.
Designed to be subclassed."""
def __init__(self, settingkls, name, descr=None, default='',
setnparams = {}):
Field.__init__(self, name, descr=descr, default=default)
self.default = default
self.setn = settingkls(name, default, **setnparams)
def makeControl(self, doc, currentwidget):
"""Use setting makeControl method to make control."""
self.setn.parent = self # setting looks to parent for document
self.setn.set(self.default)
self.document = doc
l = qt4.QLabel(self.descr)
c = self.setn.makeControl(None)
def updateval(cntrl, setn, val):
setn.set(val)
# if control changes setting, update setting
c.sigSettingChanged.connect(updateval)
return (l, c)
def setControlVal(self, cntrls, val):
self.setn.set(val)
def getDocument(self):
"""This is used by settings to get their document."""
return self.document
def getControlResults(self, cntrls):
"""Get result from setting."""
return self.setn.get()
class FieldBool(_FieldSetting):
"""A true/false value using a check box."""
def __init__(self, name, descr=None, default=False):
_FieldSetting.__init__(self, setting.Bool, name,
descr=descr, default=default)
class FieldInt(_FieldSetting):
"""An integer number field."""
def __init__(self, name, descr=None, default=0,
minval=-9999999, maxval=9999999):
"""name: name of field
descr: description to show to user
default: default value.
minval and maxval: minimum and maximum integers
"""
_FieldSetting.__init__(self, setting.Int,
name, descr=descr, default=default,
setnparams={'minval': minval, 'maxval': maxval})
class FieldFloat(_FieldSetting):
"""A floating point number field."""
def __init__(self, name, descr=None, default=None,
minval=-1e99, maxval=1e99):
"""name: name of field
descr: description to show to user
default: default value.
minval and maxval: minimum and maximum values
"""
_FieldSetting.__init__(self, setting.Float,
name, descr=descr, default=default,
setnparams={'minval': minval, 'maxval': maxval})
class FieldFloatOrAuto(_FieldSetting):
"""A floating point value or the text 'Auto'."""
def __init__(self, name, descr=None, default='Auto'):
"""name: name of field
descr: description to show to user
default: default value.
"""
_FieldSetting.__init__(self, setting.FloatOrAuto,
name, descr=descr, default=default)
class FieldColor(_FieldSetting):
"""Field for selecting a color - returns #rrggbb string."""
def __init__(self, name, descr=None, default='black'):
_FieldSetting.__init__(self, setting.Color, name,
descr=descr, default=default)
class FieldFillStyle(_FieldSetting):
"""Field for selecting fill styles - returns a string."""
def __init__(self, name, descr=None, default='solid'):
_FieldSetting.__init__(self, setting.FillStyle, name,
descr=descr, default=default)
class FieldLineStyle(_FieldSetting):
"""Field for selecting line styles - returns a string."""
def __init__(self, name, descr=None, default='solid'):
_FieldSetting.__init__(self, setting.LineStyle, name,
descr=descr, default=default)
class FieldMarker(_FieldSetting):
"""Field for selecting a marker type.
Returns a string
"""
def __init__(self, name, descr=None, default='circle'):
_FieldSetting.__init__(self, setting.Marker, name,
descr=descr, default=default)
class FieldArrow(_FieldSetting):
"""Field for selecting an arrow type.
Returns a string
"""
def __init__(self, name, descr=None, default='none'):
_FieldSetting.__init__(self, setting.Arrow, name,
descr=descr, default=default)
class FieldErrorStyle(_FieldSetting):
"""Field for selecting an error bar style
Returns a string
"""
def __init__(self, name, descr=None, default='bar'):
_FieldSetting.__init__(self, setting.ErrorStyle, name,
descr=descr, default=default)
class FieldDistance(_FieldSetting):
"""Field for selecting a veusz-style distance, e.g. '1pt'.
Returns a string
"""
def __init__(self, name, descr=None, default='1pt'):
_FieldSetting.__init__(self, setting.Distance, name,
descr=descr, default=default)
class FieldFloatList(_FieldSetting):
"""Field for entering multiple numbers, separated by commas or spaces
Returns a list/tuple of floats
"""
def __init__(self, name, descr=None, default=()):
_FieldSetting.__init__(self, setting.FloatList, name,
descr=descr, default=default)
class FieldDataset(_FieldSetting):
"""Field for selecting a datset.
Returns a string.
Note that the validity of dataset names is not checked
Note that a blank string may result
"""
def __init__(self, name, descr=None, default='', dims=1,
datatype='numeric'):
"""name: name of field
descr: description to show to user
default: default value (ignored currently)
dims: dimensions of dataset to show
datatype: type of data: numeric or text
"""
_FieldSetting.__init__(self, setting.Dataset,
name, descr=descr, default=default,
setnparams={'dimensions': dims,
'datatype': datatype})
class FieldTextMulti(_FieldSetting):
"""Field for entering multiple lines of text.
Returns a tuple/list of strings.
"""
def __init__(self, name, descr=None, default=('')):
_FieldSetting.__init__(self, setting.Strings, name,
descr=descr, default=default)
class FieldDatasetMulti(_FieldSetting):
"""Field for entering multiple datasets.
Returns a tuple/list of strings.
"""
def __init__(self, name, descr=None, default=(''), dims=1,
datatype='numeric'):
"""dims is number of dimensions of datasets to show in
drop-down list.
datatype is 'numeric' or 'text'
"""
_FieldSetting.__init__(self, setting.Datasets, name,
descr=descr, default=default,
setnparams={'dimensions': dims,
'datatype': datatype})
class FieldLineMulti(_FieldSetting):
"""A field for holding a set of lines. Consists of tuples
[('dotted', '1pt', 'color', <trans>, False), ...]
These are style, width, color, and hide or
style, widget, color, transparency, hide
This is compatible with the contour widget line style
"""
def __init__(self, name, descr=None,
default=(('solid', '1pt', 'black', False),) ):
_FieldSetting.__init__(self, setting.LineSet, name,
descr=descr, default=default)
class FieldFillMulti(_FieldSetting):
"""A field for holding a set of fills. Consists of tuples
[('solid', 'color', <trans>, False), ...]
These are color, fill style, and hide or
color, fill style, transparency and hide
This is compatible with the contour widget line style
"""
def __init__(self, name, descr=None, default=()):
_FieldSetting.__init__(self, setting.FillSet, name,
descr=descr, default=default)
class FieldFontFamily(_FieldSetting):
"""A field for holding a font family.
Returns a string.
"""
def __init__(self, name, descr=None, default=None):
"""Default None selects the default font."""
if default is None:
default = setting.Text.defaultfamily
_FieldSetting.__init__(self, setting.FontFamily, name,
descr=descr, default=default)
class FieldFilename(_FieldSetting):
"""Select a filename with a browse button."""
def __init__(self, name, descr=None, default=''):
_FieldSetting.__init__(self, setting.Filename, name,
descr=descr, default=default)
| gpl-2.0 |
ChanChiChoi/scikit-learn | examples/decomposition/plot_sparse_coding.py | 246 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/metrics/metrics.py | 232 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
golharam/rgtools | scripts/galaxy/api/load_data_with_metadata.py | 1 | 3466 | #!/usr/bin/env python
"""
This script scans a directory for files with companion '.json' files, then loads
the data from the file, and attaches the .json contents using the 'extended_metadata'
system in the library
Sample call:
python load_data_with_metadata.py <api_key> <api_url> /data/folder "API Imports"
NOTE: The upload method used requires the data library filesystem upload allow_library_path_paste
"""
import os
import shutil
import sys
import json
import time
import argparse
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
def load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field=None):
data = {}
data['folder_id'] = library_folder_id
data['file_type'] = 'auto'
data['dbkey'] = ''
data['upload_option'] = 'upload_paths'
data['filesystem_paths'] = fullpath
data['create_type'] = 'file'
data['link_data_only'] = 'link_to_files'
handle = open( fullpath + ".json" )
smeta = handle.read()
handle.close()
ext_meta = json.loads(smeta)
data['extended_metadata'] = ext_meta
if uuid_field is not None and uuid_field in ext_meta:
data['uuid'] = ext_meta[uuid_field]
libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = True)
print libset
def main(api_key, api_url, in_folder, data_library, uuid_field=None):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
for library in libs:
if library['name'] == data_library:
library_id = library['id']
if not library_id:
lib_create_data = {'name':data_library}
library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False)
library_id = library['id']
folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False)
for f in folders:
if f['name'] == "/":
library_folder_id = f['id']
if not library_id or not library_folder_id:
print "Failure to configure library destination."
sys.exit(1)
if os.path.isfile(in_folder):
fullpath = in_folder
print "Loading ", fullpath
load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
#if os.path.exists(in_folder + ".json"):
# fullpath = os.path.abspath(in_folder)
# print "Loading", fullpath
# load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
else:
for fname in os.listdir(in_folder):
fullpath = os.path.join(in_folder, fname)
if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
print "Loading", fullpath
load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("api_key", help="API KEY")
parser.add_argument('api_url', help='API URL')
parser.add_argument("in_folder", help="Input Folder")
parser.add_argument("data_library", help="Data Library")
parser.add_argument("--uuid_field", help="UUID Field", default=None)
args = parser.parse_args()
main(args.api_key, args.api_url, args.in_folder, args.data_library, args.uuid_field)
| lgpl-3.0 |
google/uncertainty-baselines | baselines/jft/active_learning.py | 1 | 32769 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Active learning loop.
This script implements a basic Active Learning loop using predictive entropy as
acquisition function.
The below command is for running this script on a TPU-VM.
Execute in `baselines/jft`:
python3 active_learning.py \
--config='experiments/vit_l32_active_learning_cifar.py' \
--config.model_init='gs://ub-checkpoints/ImageNet21k_ViT-L32/1/checkpoint.npz' \
--output_dir active_learning_results
Use `gs://ub-checkpoints/ImageNet21k_BE-L32/baselines-jft-0209_205214/1/checkpoint.npz` for BE
"""
# pylint: enable=line-too-long
from functools import partial # pylint: disable=g-importing-member standard use
import logging
import multiprocessing
from absl import app
from absl import flags
from clu import metric_writers
from clu import parameter_overview
from clu import periodic_actions
from clu import preprocess_spec
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
from ml_collections.config_flags import config_flags
import numpy as np
import tensorflow_datasets as tfds
import tqdm
import uncertainty_baselines as ub
import al_utils # local file import from baselines.jft
import batchensemble_utils # local file import from baselines.jft
import checkpoint_utils # local file import from baselines.jft
import deterministic_utils # local file import from baselines.jft
import input_utils # local file import from baselines.jft
import ood_utils # local file import from baselines.jft
import preprocess_utils # local file import from baselines.jft
import train_utils # local file import from baselines.jft
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
NINF_SCORE = float('-inf')
def get_ids_logits_masks(*,
model,
opt_repl,
ds,
use_pre_logits=False,
average_logits=True,
prefetch_to_device=1,
config=None):
"""Obtain (pre) logits for each datapoint.
This can be then used to compute entropies, and so on.
Args:
model: a initialized model.
opt_repl: an optimizer with parameters.
ds: a dataset.
use_pre_logits: if True, return pre logit instead of logit
average_logits: if True, average the logits.
prefetch_to_device: how many batches to prefix
config: experiment config.
Returns:
a tuple of jnp arrays of ids, logits, labels and masks.
"""
@partial(jax.pmap, axis_name='batch')
def compute_batch_outputs(params, images):
logits, out = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
if config and config.model_type == 'batchensemble':
ens_size = config.model.transformer.ens_size
loss_name = config.get('loss', 'sigmoid_xent')
logits = jnp.asarray(jnp.split(logits, ens_size))
if loss_name == 'sigmoid_xent':
if average_logits:
logits = batchensemble_utils.log_average_sigmoid_probs(logits)
elif loss_name == 'softmax_xent':
if average_logits:
logits = batchensemble_utils.log_average_softmax_probs(logits)
else:
raise ValueError(f'Loss name: {loss_name} not supported.')
if use_pre_logits:
# pre_logits [batch_size, hidden_size, ens_size]
pre_logits = jnp.transpose(
jnp.asarray(jnp.split(out['pre_logits'], ens_size)), axes=[1, 2, 0])
output = pre_logits
else:
output = logits
else:
if use_pre_logits:
# pre_logits [batch_size, hidden_size]
output = out['pre_logits']
else:
output = logits
# TODO(joost,andreas): For multi host this requires:
# output = jax.lax.all_gather(output, axis_name='batch')
return output
iter_ds = input_utils.start_input_pipeline(ds, prefetch_to_device)
outputs = []
ids = []
labels = []
masks = []
for _, batch in enumerate(iter_ds):
batch_id = batch['id']
batch_label = batch['labels']
batch_mask = batch['mask']
batch_output = compute_batch_outputs(opt_repl.target, batch['image'])
# This moves the batch_output from TPU to CPU right away.
batch_output = jax.device_put(batch_output, jax.devices('cpu')[0])
# TODO(joost,andreas): if we run on multi host, we need to index
# batch_outputs: batch_outputs[0]
ids.append(batch_id)
outputs.append(batch_output)
labels.append(batch_label)
masks.append(batch_mask)
if average_logits:
# 0 dimension is TPU shard, 1 is batch
outputs = jnp.concatenate(outputs, axis=1)
else:
# 0 dimension is TPU shard, 1 is ensemble, 2 is batch
outputs = jnp.concatenate(outputs, axis=2)
ids = jnp.concatenate(ids, axis=1)
labels = jnp.concatenate(labels, axis=1)
masks = jnp.concatenate(masks, axis=1)
# NOTE(joost,andreas): due to batch padding, entropies/ids will be of size:
# if training set size % batch size > 0:
# (training set size // batch size + 1) * batch size
# else:
# just training set size
return ids, outputs, labels, masks
def get_entropy_scores(logits, masks):
"""Obtain scores using entropy scoring.
Args:
logits: the logits of the pool set.
masks: the masks belonging to the pool set.
Returns:
a list of scores belonging to the pool set.
"""
log_probs = jax.nn.log_softmax(logits)
probs = jax.nn.softmax(logits)
weighted_nats = -probs * log_probs
# One simple trick to avoid NaNs later on.
weighted_nats = jnp.where(jnp.isnan(weighted_nats), 0, weighted_nats)
entropy = jnp.sum(weighted_nats, axis=-1, keepdims=False)
entropy = jnp.where(masks, entropy, NINF_SCORE)
return entropy
def get_bald_scores(logits, masks):
"""Obtain scores using BALD scoring.
Args:
logits: the logits of the pool set, first dimension is the ensemble.
masks: the masks belonging to the pool set.
Returns:
a list of scores belonging to the pool set.
"""
# TPU shard, ensemble size, batch size, logits
_, ens_size, _, _ = logits.shape
log_probs = jax.nn.log_softmax(logits)
probs = jax.nn.softmax(logits)
weighted_nats = -probs * log_probs
weighted_nats = jnp.where(jnp.isnan(weighted_nats), 0, weighted_nats)
marginal_entropy = jnp.mean(jnp.sum(weighted_nats, axis=-1), axis=1)
marginal_log_probs = jax.nn.logsumexp(log_probs, axis=1) - jnp.log(ens_size)
marginal_probs = jnp.mean(probs, axis=1)
weighted_marginal_nats = -marginal_probs * marginal_log_probs
weighted_marginal_nats = jnp.where(
jnp.isnan(weighted_marginal_nats), 0, weighted_marginal_nats)
entropy_marginal = jnp.sum(weighted_marginal_nats, axis=-1)
# Mask results.
bald = entropy_marginal - marginal_entropy
bald = jnp.where(masks, bald, NINF_SCORE)
return bald
def get_margin_scores(logits, masks):
"""Obtain scores using margin scoring.
Args:
logits: the logits of the pool set.
masks: the masks belonging to the pool set.
Returns:
a list of scores belonging to the pool set.
"""
probs = jax.nn.softmax(logits)
top2_probs = jax.lax.top_k(probs, k=2)[0]
# top_k's documentation does not specify whether the top-k are sorted or not.
margins = jnp.abs(top2_probs[..., 0] - top2_probs[..., 1])
# Lower margin means higher uncertainty, so we invert the scores.
# Then higer margin score means higher uncertainty.
margin_scores = 1.0 - margins
margin_scores = jnp.where(masks, margin_scores, NINF_SCORE)
return margin_scores
def get_msp_scores(logits, masks):
"""Obtain scores using maximum softmax probability scoring.
Args:
logits: the logits of the pool set.
masks: the masks belonging to the pool set.
Returns:
a list of scores belonging to the pool set.
"""
probs = jax.nn.softmax(logits)
max_probs = jnp.max(probs, axis=-1)
# High max prob means low uncertainty, so we invert the value.
msp_scores = 1.0 - max_probs
msp_scores = jnp.where(masks, msp_scores, NINF_SCORE)
return msp_scores
def get_uniform_scores(masks, rng):
"""Obtain scores using uniform sampling.
Args:
masks: the masks belonging to the pool set.
rng: the RNG to use for uniform sampling.
Returns:
a list of scores belonging to the pool set.
"""
uniform_scores = jax.random.uniform(key=rng, shape=masks.shape)
uniform_scores = jnp.where(masks, uniform_scores, NINF_SCORE)
return uniform_scores
def get_density_scores(*,
model,
opt_repl,
train_ds,
pool_pre_logits,
pool_masks,
config=None):
"""Obtain scores using density method.
Args:
model: an initialized model.
opt_repl: the current optimizer.
train_ds: the dataset to fit the density estimator on.
pool_pre_logits: the pre logits (features) of the pool set.
pool_masks: the masks belonging to the pool_pre_logits.
config: experiment config.
Returns:
a list of scores belonging to the pool set.
"""
# Fit LDA
_, train_pre_logits, train_labels, train_masks = get_ids_logits_masks(
model=model,
opt_repl=opt_repl,
ds=train_ds,
use_pre_logits=True,
config=config)
# train_masks_bool [num_cores, per_core_batch_size]
train_masks_bool = train_masks.astype(bool)
# train_pre_logits [num_cores, per_core_batch_size, hidden_size, ens_size]
# train_embeds [batch_size, hidden_size, ens_size]
# batch_size = num_cores * per_core_batch_size
train_embeds = train_pre_logits[train_masks_bool]
train_labels = np.argmax(train_labels[train_masks_bool], axis=-1).ravel()
use_ens = False
if len(train_embeds.shape) == 3:
# The output needs to the ensembled
# embeds is of the shape [batch_size, hidden_size, ens_size]
use_ens = True
ens_size = train_embeds.shape[-1]
if not use_ens:
# Single model
# train_embeds shape [batch_size, hidden_size]
mean_list, cov = ood_utils.compute_mean_and_cov(train_embeds, train_labels)
else:
# Ensemble models
# train_embeds shape [batch_size, hidden_size, ens_size]
mean_list, cov = [], []
for m in range(ens_size):
mu, sigma = ood_utils.compute_mean_and_cov(train_embeds[..., m],
train_labels)
mean_list.append(mu)
cov.append(sigma)
# Evaluate LDA on pool set
if not use_ens:
# Single model
# pool_pre_logits [num_cores, per_core_batch_size, hidden_size]
pool_pre_logits = pool_pre_logits.reshape(-1, pool_pre_logits.shape[-1])
dists = ood_utils.compute_mahalanobis_distance(pool_pre_logits, mean_list,
cov)
scores = np.array(jax.nn.logsumexp(-dists / 2, axis=-1))
else:
# Ensemble models
# pool_pre_logits [num_cores, per_core_batch_size, hidden_size, ens_size]
pool_pre_logits = pool_pre_logits.reshape(
[-1] + [s for s in pool_pre_logits.shape[2:]])
for m in range(ens_size):
scores_list = []
d = ood_utils.compute_mahalanobis_distance(pool_pre_logits[..., m],
mean_list[m], cov[m])
s = np.array(jax.nn.logsumexp(-d / 2, axis=-1))
scores_list.append(s)
scores = np.mean(np.array(scores_list), axis=0)
# Convert likelihood to AL score
pool_masks_bool = np.array(pool_masks.ravel(), dtype=bool)
scores[pool_masks_bool] = (
scores[pool_masks_bool].max() - scores[pool_masks_bool])
scores[~pool_masks_bool] = NINF_SCORE
return scores
def stochastic_score_acquisition(scores, acquisition_batch_size, beta, rng):
"""Stochastic acquisition method for batch selection https://arxiv.org/abs/2106.12059."""
noise = jax.random.gumbel(rng, [len(scores)])
noised_scores = scores + noise / beta
selected_noised_scores, selected_indices = jax.lax.top_k(
noised_scores, acquisition_batch_size)
selected_scores = scores[selected_indices]
logging.info(msg=f'selected_noised_scores = {selected_noised_scores}; '
f'selected_scores = {selected_scores}')
return selected_scores, selected_indices
def select_acquisition_batch_indices(*,
acquisition_batch_size,
scores,
ids,
ignored_ids,
power_acquisition=True,
rng=None):
"""Select what data points to acquire from the pool set.
Args:
acquisition_batch_size: the number of data point to acquire.
scores: acquisition scores assigned to data points.
ids: the ids belonging to the scores.
ignored_ids: the ids to ignore (previously acquired).
power_acquisition: True if use power method for batch selection.
rng: rng for power acquisition. None if not using power_acquisition.
Returns:
a tuple of lists with the ids to be acquired and their scores.
"""
scores = jnp.array(scores.ravel())
ids = jnp.array(ids.ravel())
# Ignore already acquired ids
# TODO(joost,andreas): vectorize this
ids_list = ids.tolist()
for ignored_id in ignored_ids:
scores = scores.at[ids_list.index(ignored_id)].set(NINF_SCORE)
f_ent = scores[scores > NINF_SCORE]
logging.info(msg=f'Score statistics pool set - '
f'min: {f_ent.min()}, mean: {f_ent.mean()}, max: {f_ent.max()}')
if power_acquisition:
assert rng is not None, ('rng should not be None if power acquisition is '
'used.')
beta = 1
_, selected_indices = stochastic_score_acquisition(
jnp.log(scores), acquisition_batch_size, beta, rng)
else:
# Use top-k otherwise.
selected_scores, selected_indices = jax.lax.top_k(scores,
acquisition_batch_size)
logging.info(msg=f'Top-k scores: {selected_scores}')
selected_ids = ids[selected_indices].tolist()
selected_scores = scores[selected_indices].tolist()
logging.info(
msg=f'Data selected - ids: {selected_ids}, with scores: {selected_scores}'
)
return selected_ids, selected_scores
def acquire_points(model, current_opt_repl, pool_train_ds, train_eval_ds,
train_subset_data_builder, acquisition_method, config,
rng_loop):
"""Acquire ids of the current batch."""
pool_ids, pool_outputs, _, pool_masks = get_ids_logits_masks(
model=model,
opt_repl=current_opt_repl,
ds=pool_train_ds,
use_pre_logits=acquisition_method == 'density',
average_logits=acquisition_method != 'bald',
config=config)
if acquisition_method == 'uniform':
rng_loop, rng_acq = jax.random.split(rng_loop, 2)
pool_scores = get_uniform_scores(pool_masks, rng_acq)
elif acquisition_method == 'entropy':
pool_scores = get_entropy_scores(pool_outputs, pool_masks)
elif acquisition_method == 'margin':
pool_scores = get_margin_scores(pool_outputs, pool_masks)
elif acquisition_method == 'msp':
pool_scores = get_msp_scores(pool_outputs, pool_masks)
elif acquisition_method == 'bald':
pool_scores = get_bald_scores(pool_outputs, pool_masks)
elif acquisition_method == 'density':
if train_subset_data_builder.subset_ids:
pool_scores = get_density_scores(
model=model,
opt_repl=current_opt_repl,
train_ds=train_eval_ds,
pool_pre_logits=pool_outputs,
pool_masks=pool_masks,
config=config)
else:
rng_loop, rng_acq = jax.random.split(rng_loop, 2)
pool_scores = get_uniform_scores(pool_masks, rng_acq)
else:
raise ValueError('Acquisition method not found.')
rng_loop, rng_acq = jax.random.split(rng_loop, 2)
acquisition_batch_ids, _ = select_acquisition_batch_indices(
acquisition_batch_size=config.get('acquisition_batch_size'),
scores=pool_scores,
ids=pool_ids,
ignored_ids=train_subset_data_builder.subset_ids,
power_acquisition=config.get('power_acquisition', True),
rng=rng_acq)
return acquisition_batch_ids, rng_loop
def get_accuracy(*, evaluation_fn, opt_repl, ds, prefetch_to_device=1):
"""A helper function to obtain accuracy over a dataset.
Args:
evaluation_fn: a function that evaluates a forward pass in a model.
opt_repl: an optimizer with parameters.
ds: a dataset.
prefetch_to_device: number of batches to prefetc (default: 1).
Returns:
The accuracy as a float between 0 and 1.
"""
iter_ds = input_utils.start_input_pipeline(ds, prefetch_to_device)
ncorrect, nseen = [], []
for batch in iter_ds:
batch_ncorrect, _, batch_n, _ = evaluation_fn(opt_repl.target,
batch['image'],
batch['labels'],
batch['mask'])
ncorrect += [batch_ncorrect[0]]
nseen += [batch_n[0]]
ncorrect = np.sum(ncorrect)
nseen = np.sum(nseen)
return ncorrect / nseen
def finetune(*,
update_fn,
opt_repl,
lr_fn,
ds,
rngs_loop,
total_steps,
train_eval_ds,
val_ds,
evaluation_fn,
early_stopping_patience,
prefetch_to_device=1,
profiler=None):
"""Finetunes a model on a dataset.
Args:
update_fn: a function that updates the model given relevant inputs.
opt_repl: the optimizer.
lr_fn: a function that returns the learning rate given a step.
ds: the dataset to finetune on.
rngs_loop: the rng for the loop.
total_steps: the total number of fine-tuning steps to take.
train_eval_ds: train dataset in eval mode (no augmentation or shuffling).
val_ds: validation dataset for early stopping.
evaluation_fn: function used for evaluation on validation set.
early_stopping_patience: number of steps to wait before stopping training.
prefetch_to_device: number of batches to prefetch (default: 1).
profiler: periodic_actions.Profile.
Returns:
The optimizer with updated parameters and the updated rng.
"""
iter_ds = input_utils.start_input_pipeline(ds, prefetch_to_device)
lr_iter = train_utils.prefetch_scalar(
map(lr_fn, range(total_steps)), prefetch_to_device)
best_opt_accuracy = -1
best_step = 1
train_val_accuracies = []
for current_step, train_batch, lr_repl in zip(
tqdm.trange(1, total_steps + 1), iter_ds, lr_iter):
opt_repl, rngs_loop, _ = update_fn(opt_repl, lr_repl, train_batch['image'],
train_batch['labels'], rngs_loop)
if jax.process_index() == 0 and profiler is not None:
profiler(current_step)
if current_step % 5 == 0:
train_accuracy = get_accuracy(
evaluation_fn=evaluation_fn, opt_repl=opt_repl, ds=train_eval_ds)
val_accuracy = get_accuracy(
evaluation_fn=evaluation_fn, opt_repl=opt_repl, ds=val_ds)
logging.info(
msg=f'Current accuracy - train:{train_accuracy}, val: {val_accuracy}')
train_val_accuracies.append((current_step, train_accuracy, val_accuracy))
if val_accuracy >= best_opt_accuracy:
best_step = current_step
best_opt_accuracy = val_accuracy
best_opt_repl = jax.device_get(opt_repl)
else:
logging.info(
msg=(f'Current val accuracy {val_accuracy} '
f'(vs {best_opt_accuracy})'))
if current_step - best_step >= early_stopping_patience:
logging.info(msg='Early stopping, returning best opt_repl!')
break
# best_opt_repl could be unassigned, but we should error out then
info = dict(
best_val_accuracy=best_opt_accuracy,
best_step=best_step,
train_val_accuracies=train_val_accuracies)
return best_opt_repl, rngs_loop, info
def main(config, output_dir):
if jax.process_count() > 1:
raise NotImplementedError
# Note: switch to ProfileAllHosts() if you need to profile all hosts.
# (Xprof data become much larger and take longer to load for analysis)
profiler = periodic_actions.Profile(
# Create profile after every restart to analyze pre-emption related
# problems and assure we get similar performance in every run.
logdir=output_dir,
first_profile=10)
logging.info(config)
acquisition_method = config.get('acquisition_method')
if acquisition_method == 'bald':
assert config.model_type == 'batchensemble', 'Bald requires batch ensemble'
# Create an asynchronous multi-metric writer.
writer = metric_writers.create_default_writer(
output_dir, just_logging=jax.process_index() > 0)
writer.write_hparams(dict(config))
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
def write_note(note):
if jax.process_index() == 0:
logging.info('NOTE: %s', note)
write_note(f'Initializing for {acquisition_method}')
# Download dataset
data_builder = tfds.builder(config.dataset)
data_builder.download_and_prepare()
seed = config.get('seed', 0)
rng = jax.random.PRNGKey(seed)
batch_size = config.batch_size
batch_size_eval = config.get('batch_size_eval', batch_size)
local_batch_size = batch_size // jax.process_count()
local_batch_size_eval = batch_size_eval // jax.process_count()
pp_eval = preprocess_spec.parse(
spec=config.pp_eval, available_ops=preprocess_utils.all_ops())
val_ds = input_utils.get_data(
dataset=config.dataset,
split=config.val_split,
rng=None,
process_batch_size=local_batch_size_eval,
preprocess_fn=pp_eval,
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
)
test_ds = input_utils.get_data(
dataset=config.dataset,
split=config.test_split,
rng=None,
process_batch_size=local_batch_size_eval,
preprocess_fn=pp_eval,
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
)
# Init model
if config.model_type == 'deterministic':
model_utils = deterministic_utils
reinit_params = config.get('model_reinit_params',
('head/kernel', 'head/bias'))
head_prefix = 'head'
model = ub.models.vision_transformer(
num_classes=config.num_classes, **config.get('model', {}))
elif config.model_type == 'batchensemble':
model_utils = batchensemble_utils
reinit_params = ('batchensemble_head/bias', 'batchensemble_head/kernel',
'batchensemble_head/fast_weight_alpha',
'batchensemble_head/fast_weight_gamma')
head_prefix = 'batchensemble_head'
model = ub.models.vision_transformer_be(
num_classes=config.num_classes, **config.model)
else:
raise ValueError('Expect config.model_type to be "deterministic" or'
f'"batchensemble", but received {config.model_type}.')
update_fn = model_utils.create_update_fn(model, config)
evaluation_fn = model_utils.create_evaluation_fn(model, config)
# NOTE: We need this because we need an Id field of type int.
# TODO(andreas): Rename to IdSubsetDatasetBuilder?
# The original tf dataset builder but with int ids.
pool_subset_data_builder = al_utils.SubsetDatasetBuilder(
data_builder, subset_ids=None)
# NOTE: below line is necessary on multi host setup
# pool_ds_rng = jax.random.fold_in(pool_ds_rng, jax.process_index())
rng, pool_ds_rng = jax.random.split(rng)
pool_train_ds = input_utils.get_data(
dataset=pool_subset_data_builder,
split=config.train_split,
rng=pool_ds_rng,
process_batch_size=local_batch_size,
preprocess_fn=pp_eval,
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False)
# Potentially acquire an initial training set.
initial_training_set_size = config.get('initial_training_set_size', 10)
if initial_training_set_size > 0:
write_note(f'Creating {initial_training_set_size} initial training ids.')
rng, rng_initial = jax.random.split(rng)
initial_training_set_batch_ids = al_utils.sample_class_balanced_ids(
initial_training_set_size,
pool_train_ds,
config.num_classes,
shuffle_rng=rng_initial)
else:
initial_training_set_batch_ids = []
write_note(f'{len(initial_training_set_batch_ids)} initial training ids '
f'= {initial_training_set_batch_ids}')
train_subset_data_builder = al_utils.SubsetDatasetBuilder(
data_builder, subset_ids=initial_training_set_batch_ids)
init = model_utils.create_init(model, config, test_ds)
rng, rng_init = jax.random.split(rng)
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])
parameter_overview.log_parameter_overview(params_cpu)
writer.write_scalars(step=0, scalars={'num_params': num_params})
# Load the optimizer from flax.
opt_name = config.get('optim_name')
opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))
if config.get('finetune_head_only', False):
head_params = flax.traverse_util.ModelParamTraversal(
lambda path, _: head_prefix in path)
opt_def = flax.optim.MultiOptimizer((head_params, opt_def))
# We jit this, such that the arrays that are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
opt_cpu = jax.jit(opt_def.create)(params_cpu)
if config.model_init:
write_note('Loading the model checkpoint...')
loaded_params = checkpoint_utils.load_checkpoint(
tree=None, path=config.model_init)
loaded_params = checkpoint_utils.restore_from_pretrained_params(
params_cpu,
loaded_params,
config.model.representation_size,
config.model.classifier,
reinit_params,
)
else:
write_note('Use random model initialization.')
loaded_params = params_cpu
opt_cpu = opt_cpu.replace(target=loaded_params)
del loaded_params, params_cpu # Free up memory.
# TODO(dusenberrymw): Remove this once checkpoint_utils is fixed to return
# only CPU arrays.
opt_cpu = jax.device_get(opt_cpu)
test_accuracies = []
training_sizes = []
rng, rng_loop = jax.random.split(rng)
rngs_loop = flax_utils.replicate(rng_loop)
# TODO(joost,andreas): double check if below is still necessary
# (train_split is independent of this)
# NOTE: train_ds_rng is re-used for all train_ds creations
rng, train_ds_rng = jax.random.split(rng)
measurements = {}
accumulated_steps = 0
current_train_ds_length = len(train_subset_data_builder.subset_ids)
write_note(f'Initial training set size: {current_train_ds_length}')
while current_train_ds_length <= config.get('max_training_set_size'):
current_opt_repl = flax_utils.replicate(opt_cpu)
# Only fine-tune if there is anything to fine-tune with.
if current_train_ds_length > 0:
# We repeat the dataset several times, such that we can obtain batches
# of size batch_size, even at start of training. These batches will be
# effectively 'bootstrap' sampled, meaning they are sampled with
# replacement from the original training set.
repeated_train_ds = input_utils.get_data(
dataset=train_subset_data_builder,
split=config.train_split,
rng=train_ds_rng,
process_batch_size=local_batch_size,
preprocess_fn=preprocess_spec.parse(
spec=config.pp_train, available_ops=preprocess_utils.all_ops()),
cache='loaded',
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch_size=config.get('prefetch_to_host', 2),
)
# We use this dataset to evaluate how well we perform on the training set,
# and for fitting the feature density method.
# We need training set accuracy to evaluate if we fit well within
# max_steps budget.
train_eval_ds = input_utils.get_data(
dataset=train_subset_data_builder,
split=config.train_split,
rng=train_ds_rng,
process_batch_size=local_batch_size,
preprocess_fn=pp_eval,
cache='loaded',
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
)
# NOTE: warmup and decay are not a good fit for the small training set
# lr_fn = train_utils.create_learning_rate_schedule(config.total_steps,
# **config.get('lr', {})
# )
lr_fn = lambda x: config.lr.base
early_stopping_patience = config.get('early_stopping_patience', 15)
current_opt_repl, rngs_loop, measurements = finetune(
update_fn=update_fn,
opt_repl=current_opt_repl,
lr_fn=lr_fn,
ds=repeated_train_ds,
rngs_loop=rngs_loop,
total_steps=config.total_steps,
train_eval_ds=train_eval_ds,
val_ds=val_ds,
evaluation_fn=evaluation_fn,
early_stopping_patience=early_stopping_patience,
profiler=profiler)
train_val_accuracies = measurements.pop('train_val_accuracies')
current_steps = 0
for step, train_acc, val_acc in train_val_accuracies:
writer.write_scalars(accumulated_steps + step, {
'train_accuracy': train_acc,
'val_accuracy': val_acc
})
current_steps = step
accumulated_steps += current_steps + 10
else:
train_eval_ds = None
test_accuracy = get_accuracy(
evaluation_fn=evaluation_fn, opt_repl=current_opt_repl, ds=test_ds)
write_note(f'Accuracy at {current_train_ds_length}: {test_accuracy}')
test_accuracies.append(test_accuracy)
measurements.update({'test_accuracy': test_accuracy})
writer.write_scalars(current_train_ds_length, measurements)
training_subset_ids = train_subset_data_builder.subset_ids
# Start picking the next training points.
training_sizes.append(current_train_ds_length)
acquisition_batch_ids, rng_loop = acquire_points(
model, current_opt_repl, pool_train_ds, train_eval_ds,
train_subset_data_builder, acquisition_method, config, rng_loop)
train_subset_data_builder.subset_ids.update(acquisition_batch_ids)
write_note(f'Training set ids at train set size {current_train_ds_length}:'
f'{training_subset_ids}')
write_note(f'Selected ids at train set size {current_train_ds_length}:'
f'{acquisition_batch_ids}')
current_train_ds_length = len(train_subset_data_builder.subset_ids)
write_note(
f'Training set size after acquisition: {current_train_ds_length}')
write_note(f'Final acquired training ids: '
f'{train_subset_data_builder.subset_ids}'
f'Accuracies: {test_accuracies}')
pool.close()
pool.join()
writer.close()
# TODO(joost,andreas): save the final checkpoint
return (train_subset_data_builder.subset_ids, test_accuracies)
if __name__ == '__main__':
jax.config.config_with_absl()
def _main(argv):
del argv
main(FLAGS.config, FLAGS.output_dir)
app.run(_main) # Ignore the returned values from `main`.
| apache-2.0 |
xyguo/scikit-learn | sklearn/tree/export.py | 14 | 16020 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
xyguo/scikit-learn | sklearn/utils/tests/test_multiclass.py | 33 | 13405 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
google/uncertainty-baselines | uncertainty_baselines/datasets/toxic_comments_test.py | 1 | 7250 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for toxicity classification datasets."""
from absl.testing import parameterized
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import toxic_comments
HUB_PREPROCESS_URL = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
CCDataClass = toxic_comments.CivilCommentsDataset
CCIdentitiesDataClass = toxic_comments.CivilCommentsIdentitiesDataset
WTDataClass = toxic_comments.WikipediaToxicityDataset
def _create_fake_signals(dataset_name, is_train_signals):
is_train_signals = is_train_signals[:5]
is_train_signals += [0] * (5 - len(is_train_signals))
fake_signals = {
'civil_comments_identities':
pd.DataFrame({
'id': [b'876617', b'688889', b'5769682', b'4997434', b'5489660'],
'is_train': is_train_signals
}),
'civil_comments':
pd.DataFrame({
'id': [b'634903', b'5977874', b'5390534', b'871483', b'825427'],
'is_train': is_train_signals
}),
'wikipedia_toxicity':
pd.DataFrame({
'id': [
b'ee9697785fe41ff8', b'29fec512f2ee929e', b'88944b29dde50648',
b'c7bf1f59096102f3', b'7d71ee0e8ea0794a'
],
'is_train': is_train_signals
})
}
return fake_signals[dataset_name]
class ToxicCommentsDatasetTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('civil_train', tfds.Split.TRAIN, CCDataClass),
('civil_valid', tfds.Split.VALIDATION, CCDataClass),
('civil_test', tfds.Split.TEST, CCDataClass),
('civil_identities_train', tfds.Split.TRAIN, CCIdentitiesDataClass),
('civil_identities_valid', tfds.Split.VALIDATION, CCIdentitiesDataClass),
('civil_identities_test', tfds.Split.TEST, CCIdentitiesDataClass),
('wiki_train', tfds.Split.TRAIN, WTDataClass),
('wiki_valid', tfds.Split.VALIDATION, WTDataClass),
('wiki_test', tfds.Split.TEST, WTDataClass))
def testDatasetSize(self, split, dataset_class):
batch_size = 9 if split == tfds.Split.TRAIN else 5
dataset_builder = dataset_class(
split=split, dataset_type='tfds', shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
features = element['features']
labels = element['labels']
ids = element['id']
self.assertEqual(features.shape[0], batch_size)
self.assertEqual(labels.shape[0], batch_size)
self.assertEqual(ids.shape[0], batch_size)
@parameterized.named_parameters(
('civil_train', tfds.Split.TRAIN, CCDataClass),
('civil_valid', tfds.Split.VALIDATION, CCDataClass),
('civil_test', tfds.Split.TEST, CCDataClass),
('civil_identities_train', tfds.Split.TRAIN, CCIdentitiesDataClass),
('civil_identities_valid', tfds.Split.VALIDATION, CCIdentitiesDataClass),
('civil_identities_test', tfds.Split.TEST, CCIdentitiesDataClass),
('wiki_train', tfds.Split.TRAIN, WTDataClass),
('wiki_valid', tfds.Split.VALIDATION, WTDataClass),
('wiki_test', tfds.Split.TEST, WTDataClass))
def testTFHubProcessor(self, split, dataset_class):
batch_size = 9 if split == tfds.Split.TRAIN else 5
dataset_builder = dataset_class(
split=split,
dataset_type='tfds',
shuffle_buffer_size=20,
tf_hub_preprocessor_url=HUB_PREPROCESS_URL)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
input_ids = element['input_ids']
input_mask = element['input_mask']
segment_ids = element['segment_ids']
self.assertEqual(input_ids.shape[0], batch_size)
self.assertEqual(input_mask.shape[0], batch_size)
self.assertEqual(segment_ids.shape[0], batch_size)
@parameterized.named_parameters(
('civil_comments', CCDataClass),
('civil_comments_identities', CCIdentitiesDataClass),
('wikipedia_toxicity', WTDataClass))
def testSubType(self, dataset_class):
"""Test if toxicity subtype is available from the example."""
batch_size = 9
dataset_builder = dataset_class(
split=tfds.Split.TRAIN, dataset_type='tfds', shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
for subtype_name in dataset_builder.additional_labels:
self.assertEqual(element[subtype_name].shape[0], batch_size)
self.assertEqual(element[subtype_name].dtype, tf.float32)
@parameterized.named_parameters(
('civil_comments', 'civil_comments', CCDataClass),
('civil_comments_identities', 'civil_comments_identities',
CCIdentitiesDataClass),
('wikipedia_toxicity', 'wikipedia_toxicity', WTDataClass))
def testAppendSignals(self, dataset_name, dataset_class):
"""Test if toxicity subtype is available from the example."""
batch_size = 5
is_train_signals = [1, 0, 0, 1, 0]
signals = _create_fake_signals(dataset_name, is_train_signals)
dataset_builder = dataset_class(
split=tfds.Split.TRAIN,
dataset_type='tfds',
is_training=False, # Fix the order.
signals=signals,
shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
self.assertEqual(element['is_train'].numpy().tolist(), is_train_signals)
@parameterized.named_parameters(
('civil_comments', 'civil_comments', CCDataClass),
('civil_comments_identities', 'civil_comments_identities',
CCIdentitiesDataClass),
('wikipedia_toxicity', 'wikipedia_toxicity', WTDataClass))
def testOnlyKeepTrainExamples(self, dataset_name, dataset_class):
"""Test if toxicity subtype is available from the example."""
batch_size = 3
is_train_signals = [1, 1, 0, 1, 1]
signals = _create_fake_signals(dataset_name, is_train_signals)
dataset_builder = dataset_class(
split=tfds.Split.TRAIN,
dataset_type='tfds',
is_training=False, # Fix the order.
signals=signals,
only_keep_train_examples=True,
shuffle_buffer_size=20)
dataset = dataset_builder.load(batch_size=batch_size).take(1)
element = next(iter(dataset))
self.assertEqual(dataset_builder.num_examples, sum(is_train_signals))
expected_is_train_ids = signals[signals['is_train'] ==
1]['id'].values.tolist()
self.assertEqual(element['id'].numpy().tolist(),
expected_is_train_ids[:batch_size])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
ishanic/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 385 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 37 | 3869 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
MichaelChatzidakis/Mn_Classifier_CNNs | test_digitized_spectra.py | 1 | 4604 | import glob
from keras.models import load_model
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import glob
import os
import sys
from train_crossval import load_data
from sklearn.model_selection import StratifiedKFold
from keras.utils import np_utils
def main(argv):
data_path = '/home/mike/Mn_Valences/Mn_Classifier_CV_Good_Copy/Data/Digitized_Mn_Usecases.pkl'
data = pd.read_pickle(data_path)
valence_names = ['Mn4', 'Mn2', 'Mn3', 'mixed23', 'mixed34', 'mixed']
spectra_set,key=[],[]
for valence in range(len(data)):
for spectra in range(len(data[valence])):
bins = 300
lower_bound=600
upper_bound=700
x = data[valence].iloc[spectra]['Energy']
y = data[valence].iloc[spectra]['Intensity']
x = x[(x>lower_bound)]
x = x[(x<upper_bound)]
y = y[x.index]
min_energy = np.min(x)
max_energy = np.max(x)
new_energy=np.linspace(min_energy,max_energy,bins)
new_intensity = np.interp(new_energy, x, y)
spectra_set.append(new_intensity)
key.append(spectra)
labels = [2]*12+[0]*10+[1]*9
spectra_set=spectra_set[:len(labels)]
x,y = load_data()
cv = StratifiedKFold(n_splits=10, random_state=13, shuffle=False)
X_train = [x[train_index] for train_index, test_index in cv.split(x, y)]
X_test = [x[test_index] for train_index, test_index in cv.split(x, y)]
y_train = [y[train_index] for train_index, test_index in cv.split(x, y)]
y_test = [y[test_index] for train_index, test_index in cv.split(x, y)]
spectra_set=np.array(spectra_set).astype('float32')
spectra_set -= np.mean(x)
spectra_set /= np.max(x)
spectra_set = spectra_set.reshape(spectra_set.shape + (1,))
labels = np.array(labels)
labels = np_utils.to_categorical(labels)
neural_network_name=['mlp_500epochs_Shift_dataaug-x0',
'cnn_500epochs_Shift_dataaug-x0',
'cnnmlp_500epochs_Shift_dataaug-x0',
'mlp_500epochs_Shift_dataaug-x1',
'cnn_500epochs_Shift_dataaug-x1',
'cnnmlp_500epochs_Shift_dataaug-x1',
'mlp_500epochs_Shift_dataaug-x10',
'cnn_500epochs_Shift_dataaug-x10',
'cnnmlp_500epochs_Shift_dataaug-x10']
scores = np.zeros((9,10))
for j,name in enumerate(neural_network_name):
print(name)
weights_paths=load_best_weights(name)
for i in range(10):
print("fold", i)
model=load_model(weights_paths[i])
scores[j][i]=model.evaluate(spectra_set, labels, verbose=0)[1]
print(scores[j][i])
import pdb; pdb.set_trace()
pd.DataFrame(scores).to_csv('{}_scores_dig_ref_spec.csv'.format(neural_network_name))
def load_best_weights(model):
root_path = os.path.join('/home/mike/Mn_Valences/Mn_Classifier_Reviewer_edits/weights/cross_validation_results/', model)
try:
weight_folds=sorted(next(os.walk(root_path))[1])
except StopIteration:
pass
weights=[]
for fold in weight_folds:
files_path = os.path.join(root_path, fold, '*.h5')
cv_weights = sorted(glob.iglob(files_path), key=os.path.getctime, reverse=True)
weights.append(cv_weights[0])
return weights
def get_pred_score(spectra_set, models):
pred_score_mlp1=[]
for j, unknown_spectra in enumerate(spectra_set):
pred_label=[]
print("loading spectra: {}.".format( round(unknown_spectra.mean(),4) ) )
for i in range(10):
print(j, i)
data_set = X_train[i]
unknown_spectra -= np.mean(data_set)
unknown_spectra /= np.max(data_set)
pred = models[i].predict(unknown_spectra.reshape(1,300,1), verbosity=1)
pred_label.append(np.argmax(pred))
pred_label=pred_label
pred_score_mlp1.append(pred_label)
print("Real label is {}, predicted label is {}".format(labels[j], pred_score_mlp1[j]))
return pred_score_mlp1
def get_acc_scores(pred_score_cnnmlp):
ss=pd.DataFrame([(pred_score_cnnmlp)]).transpose()
ss['round'] =np.round(pred_score_cnnmlp).astype('int')
ss['labels']=labels
ss['correct'] = ss['round']==ss['labels']
acc=[]
for valence in [0,1,2]:
acc.append(ss[ss['labels']==valence]['correct'].sum()/len( ss[ss['labels']==valence]['correct'])*1.0)
return acc
if __name__ == "__main__":
main(sys.argv)
| mit |
ChanChiChoi/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 257 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
rigdenlab/conkit | conkit/misc/tests/test___init__.py | 1 | 4394 | """Testing facility for conkit.misc.__init__"""
__author__ = "Felix Simkovic"
__date__ = "10 Jan 2018"
import unittest
from conkit.misc import *
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
class TestMiscInit(unittest.TestCase):
def test_load_validation_model_1(self):
classifier, scaler = load_validation_model()
self.assertIsInstance(classifier, SVC)
self.assertIsInstance(scaler, StandardScaler)
def test_load_validation_model_2(self):
classifier, scaler = load_validation_model()
self.assertEqual(classifier.n_features_in_, len(SELECTED_VALIDATION_FEATURES))
self.assertEqual(scaler.n_features_in_, len(SELECTED_VALIDATION_FEATURES))
def test_load_validation_model_3(self):
classifier, scaler = load_validation_model()
self.assertTrue(hasattr(classifier, 'predict_proba'))
self.assertTrue(hasattr(scaler, 'transform'))
def test_normalize_1(self):
self.assertListEqual([0.0, 0.5, 1.0], normalize([1, 2, 3]))
def test_normalize_2(self):
self.assertListEqual([0.0, 0.5, 1.0], normalize([0.0, 0.5, 1.0]))
def test_normalize_3(self):
self.assertListEqual([0.0, 0.5, 1.0], normalize([-3, -2, -1]))
def test_normalize_4(self):
self.assertListEqual([0.0, 1.0], normalize([1, 2]))
def test_normalize_5(self):
self.assertListEqual([-1.0, 1.0], normalize([1, 2], vmin=-1))
def test_normalize_6(self):
self.assertListEqual([0.0, 2.0], normalize([1, 2], vmax=2))
def test_normalize_7(self):
self.assertListEqual([0.0, -1.0], normalize([1, 2], vmax=-1))
def test_normalize_8(self):
self.assertListEqual([0.2, 0.8], normalize([1, 2], vmin=0.2, vmax=0.8))
def test_normalize_9(self):
self.assertListEqual([0.2, 0.5, 0.8], normalize([1, 2, 3], vmin=0.2, vmax=0.8))
def test_normalize_10(self):
self.assertListEqual([1.0, 1.0, 1.0], normalize([2, 2, 2]))
def test_normalize_11(self):
self.assertListEqual([0.8, 0.8, 0.8], normalize([2, 2, 2], vmin=0.2, vmax=0.8))
def test_deprecated_1(self):
@deprecate("0.0.0")
def f():
return True
self.assertTrue(f())
def test_deprecated_2(self):
@deprecate("0.0.0", msg="hello world")
def f():
return True
self.assertTrue(f())
def test_deprecated_3(self):
@deprecate("0.0.0")
def f(a, b):
return a + b
self.assertEqual(2, f(1, 1))
def test_deprecated_4(self):
@deprecate("0.0.0")
class Obj(object):
pass
self.assertTrue(Obj())
def test_deprecated_5(self):
class Obj(object):
@deprecate("0.0.0")
def f(self, a, b):
return a + b
self.assertEqual(2, Obj().f(1, 1))
def test_deprecated_6(self):
class Obj(object):
@staticmethod
@deprecate("0.0.0")
def f(a, b):
return a + b
self.assertEqual(2, Obj.f(1, 1))
def test_deprecated_7(self):
class Obj(object):
@classmethod
@deprecate("0.0.0")
def f(cls, a, b):
return a + b
self.assertEqual(2, Obj().f(1, 1))
def test_deprecated_8(self):
class Obj(object):
@property
@deprecate("0.0.0")
def x(self):
return 1
self.assertEqual(1, Obj().x)
def test_deprecated_9(self):
class Obj(object):
@property
def x(self):
return self._x
@x.setter
@deprecate("0.0.0")
def x(self, x):
self._x = x
o = Obj()
o.x = 2
self.assertEqual(2, o.x)
def test_deprecated_10(self):
class Obj(object):
@deprecate("0.0.0")
@staticmethod
def f(a, b):
return a + b
with self.assertRaises(Exception):
Obj.f(1, 1)
def test_deprecated_11(self):
class Obj(object):
@deprecate("0.0.0")
@classmethod
def f(cls, a, b):
return a + b
with self.assertRaises(AttributeError):
Obj().f(1, 1)
if __name__ == "__main__":
unittest.main(verbosity=2)
| bsd-3-clause |
swethasubramanian/LungCancerDetection | src/models/predict_model.py | 2 | 6291 | """
A script to predict nodules using conv net model and for analysis of results
"""
import tflearn
from cnn_model import CNNModel
import tensorflow as tf
import pickle
import pandas as pd
import numpy as np
import h5py
from sklearn.metrics import roc_curve, auc, confusion_matrix
import itertools
import matplotlib.pyplot as plt
hdfs_file = '../data/test.h5'
def create_mosaic(image, nrows, ncols):
"""
Tiles all the layers in nrows x ncols
Args:
------
image = 3d numpy array of M * N * number of filters dimensions
nrows = integer representing number of images in a row
ncol = integer representing number of images in a column
returns formatted image
"""
M = image.shape[1]
N = image.shape[2]
npad = ((0,0), (1,1), (1,1))
image = np.pad(image, pad_width = npad, mode = 'constant',\
constant_values = 0)
M += 2
N += 2
image = image.reshape(nrows, ncols, M, N)
image = np.transpose(image, (0,2,1,3))
image = image.reshape(M*nrows, N*ncols)
return image
def format_image(image, num_images):
"""
Formats images
"""
idxs = np.random.choice(image.shape[0], num_images)
M = image.shape[1]
N = image.shape[2]
imagex = np.squeeze(image[idxs, :, :, :])
print imagex.shape
return imagex
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Purples):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
#plt.grid('off')
plt.ylabel('True label')
plt.xlabel('Predicted label')
def load_images(filename):
"""
Loads images contained in hdfs file
"""
h5f2 = h5py.File(filename, 'r')
X_test_images = h5f2['X']
Y_test_labels = h5f2['Y']
return X_test_images, Y_test_labels
def plot_predictions(images, filename):
"""
Plots the predictions mosaic
"""
imagex = format_image(images, 4)
mosaic = create_mosaic(imagex, 2, 2)
plt.figure(figsize = (12, 12))
plt.imshow(mosaic, cmap = 'gray')
plt.axis('off')
plt.savefig(filename + '.png', bbox_inches='tight')
def get_predictions(X_test_images, Y_test_labels):
"""
Args:
------
Given hdfs file of X_test_images and Y_test_labels
returns:
--------
predictions: probability values for each class
label_predictions: returns predicted classes
"""
## Model definition
convnet = CNNModel()
network = convnet.define_network(X_test_images)
model = tflearn.DNN(network, tensorboard_verbose=0,\
checkpoint_path='nodule3-classifier.tfl.ckpt')
model.load("nodule3-classifier.tfl")
predictions = np.vstack(model.predict(X_test_images[:,:,:,:]))
#label_predictions = np.vstack(model.predict_label(X_test_images[:,:,:,:]))
score = model.evaluate(X_test_images, Y_test_labels)
label_predictions = np.zeros_like(predictions)
label_predictions[np.arange(len(predictions)), predictions.argmax(1)] = 1
return predictions, label_predictions
def get_roc_curve(Y_test_labels, predictions):
"""
Args:
-------
hdfs datasets: Y_test_labels and predictions
Returns:
--------
fpr: false positive Rate
tpr: true posiive Rate
roc_auc: area under the curve value
"""
fpr, tpr, thresholds = roc_curve(Y_test_labels[:,1], predictions[:,1], pos_label=1)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
def get_metrics(Y_test_labels, label_predictions):
"""
Args:
-----
Y_test_labels, label_predictions
Returns:
--------
precision, recall and specificity values and cm
"""
cm = confusion_matrix(Y_test_labels[:,1], label_predictions[:,1])
TN = cm[0][0]
FP = cm[0][1]
FN = cm[1][0]
TP = cm[1][1]
precision = TP*1.0/(TP+FP)
recall = TP*1.0/(TP+FN)
specificity = TN*1.0/(TN+FP)
return precision, recall, specificity, cm
def plot_roc_curve(fpr, tpr, roc_auc):
"""
Plots ROC curve
Args:
-----
FPR, TPR and AUC
"""
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='(AUC = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.axis('equal')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.savefig('roc1.png', bbox_inches='tight')
def main():
X_test_images, Y_test_labels = load_images(hdfs_file)
predictions, label_predictions = \
get_predictions(X_test_images, Y_test_labels)
fpr, tpr, roc_auc = get_roc_curve(Y_test_labels, predictions)
plot_roc_curve(fpr, tpr, roc_auc)
precision, recall, specificity, cm =\
get_metrics(Y_test_labels, label_predictions)
print precision, recall, specificity
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cm, classes=['no-nodule', 'nodule'], \
title='Confusion matrix')
plt.savefig('confusion_matrix.png', bbox_inches='tight')
# Plot all inputs representing True Positives, FP, FN, TN
TP_images = X_test_images[(Y_test_labels[:,1] == 1) & (label_predictions[:,1] == 1), :,:,:]
FP_images = X_test_images[(Y_test_labels[:,1] == 0) & (label_predictions[:,1] == 1), :,:,:]
TN_images = X_test_images[(Y_test_labels[:,1] == 0) & (label_predictions[:,1] == 0), :,:,:]
FN_images = X_test_images[(Y_test_labels[:,1] == 1) & (label_predictions[:,1] == 0), :,:,:]
## Choose 16 images randomly
plot_predictions(TP_images, 'preds_tps')
plot_predictions(TN_images, 'preds_tns')
plot_predictions(FN_images, 'preds_fns')
plot_predictions(FP_images, 'preds_fps')
if __name__ == "__main__":
main()
| mit |
zachmayer/gensim | gensim/corpora/sharded_corpus.py | 63 | 35097 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Original author: Jan Hajic jr.
# Copyright (C) 2015 Radim Rehurek and gensim team.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module implements a corpus class that stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from it
on demand).
The corpus is intended for situations where you need to use your data
as numpy arrays for some iterative processing (like training something
using SGD, which usually involves heavy matrix multiplication).
"""
from __future__ import print_function
import logging
import os
import math
import numpy
import scipy.sparse as sparse
import time
logger = logging.getLogger(__name__)
#: Specifies which dtype should be used for serializing the shards.
_default_dtype = float
try:
import theano
_default_dtype = theano.config.floatX
except ImportError:
logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.')
from six.moves import xrange
import gensim
from gensim.corpora import IndexedCorpus
from gensim.interfaces import TransformedCorpus
class ShardedCorpus(IndexedCorpus):
"""
This corpus is designed for situations where you need to train a model
on matrices, with a large number of iterations. (It should be faster than
gensim's other IndexedCorpus implementations for this use case; check the
`benchmark_datasets.py` script. It should also serialize faster.)
The corpus stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from
it on demand). Persistence is done using the standard gensim load/save methods.
.. note::
The dataset is **read-only**, there is - as opposed to gensim's Similarity
class, which works similarly - no way of adding documents to the dataset
(for now).
You can use ShardedCorpus to serialize your data just like any other gensim
corpus that implements serialization. However, because the data is saved
as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to
supply the dimension of your data to the corpus. (The dimension of word
frequency vectors will typically be the size of the vocabulary, etc.)
>>> corpus = gensim.utils.mock_data()
>>> output_prefix = 'mydata.shdat'
>>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000)
The `output_prefix` tells the ShardedCorpus where to put the data.
Shards are saved as `output_prefix.0`, `output_prefix.1`, etc.
All shards must be of the same size. The shards can be re-sized (which
is essentially a re-serialization into new-size shards), but note that
this operation will temporarily take twice as much disk space, because
the old shards are not deleted until the new shards are safely in place.
After serializing the data, the corpus will then save itself to the file
`output_prefix`.
On further initialization with the same `output_prefix`, the corpus
will load the already built dataset unless the `overwrite` option is
given. (A new object is "cloned" from the one saved to `output_prefix`
previously.)
To retrieve data, you can load the corpus and use it like a list:
>>> sh_corpus = ShardedCorpus.load(output_prefix)
>>> batch = sh_corpus[100:150]
This will retrieve a numpy 2-dimensional array of 50 rows and 1000
columns (1000 was the dimension of the data we supplied to the corpus).
To retrieve gensim-style sparse vectors, set the `gensim` property:
>>> sh_corpus.gensim = True
>>> batch = sh_corpus[100:150]
The batch now will be a generator of gensim vectors.
Since the corpus needs the data serialized in order to be able to operate,
it will serialize data right away on initialization. Instead of calling
`ShardedCorpus.serialize()`, you can just initialize and use the corpus
right away:
>>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000)
>>> batch = corpus[100:150]
ShardedCorpus also supports working with scipy sparse matrices, both
during retrieval and during serialization. If you want to serialize your
data as sparse matrices, set the `sparse_serialization` flag. For
retrieving your data as sparse matrices, use the `sparse_retrieval`
flag. (You can also retrieve densely serialized data as sparse matrices,
for the sake of completeness, and vice versa.) By default, the corpus
will retrieve numpy ndarrays even if it was serialized into sparse
matrices.
>>> sparse_prefix = 'mydata.sparse.shdat'
>>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True)
>>> sparse_corpus = ShardedCorpus.load(sparse_prefix)
>>> batch = sparse_corpus[100:150]
>>> type(batch)
<type 'numpy.ndarray'>
>>> sparse_corpus.sparse_retrieval = True
>>> batch = sparse_corpus[100:150]
<class 'scipy.sparse.csr.csr_matrix'>
While you *can* touch the `sparse_retrieval` attribute during the life
of a ShardedCorpus object, you should definitely not touch `
`sharded_serialization`! Changing the attribute will not miraculously
re-serialize the data in the requested format.
The CSR format is used for sparse data throughout.
Internally, to retrieve data, the dataset keeps track of which shard is
currently open and on a `__getitem__` request, either returns an item from
the current shard, or opens a new one. The shard size is constant, except
for the last shard.
"""
def __init__(self, output_prefix, corpus, dim=None,
shardsize=4096, overwrite=False, sparse_serialization=False,
sparse_retrieval=False, gensim=False):
"""Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn't advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.
"""
self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim # This number may change during initialization/loading.
# Sparse vs. dense serialization and retrieval.
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
# The "state" of the dataset.
self.current_shard = None # The current shard itself (numpy ndarray)
self.current_shard_n = None # Current shard is the current_shard_n-th
self.current_offset = None # The index into the dataset which
# corresponds to index 0 of current shard
logger.info('Initializing sharded corpus with prefix '
'{0}'.format(output_prefix))
if (not os.path.isfile(output_prefix)) or overwrite:
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
# Save automatically, to facilitate re-loading
# and retain information about how the corpus
# was serialized.
logger.info('Saving ShardedCorpus object to '
'{0}'.format(self.output_prefix))
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
"""Initialize shards from the corpus."""
if not gensim.utils.is_corpus(corpus):
raise ValueError('Cannot initialize shards without a corpus to read'
' from! (Got corpus type: {0})'.format(type(corpus)))
proposed_dim = self._guess_n_features(corpus)
if proposed_dim != self.dim:
if self.dim is None:
logger.info('Deriving dataset dimension from corpus: '
'{0}'.format(proposed_dim))
else:
logger.warn('Dataset dimension derived from input corpus diffe'
'rs from initialization argument, using corpus.'
'(corpus {0}, init arg {1})'.format(proposed_dim,
self.dim))
self.dim = proposed_dim
self.offsets = [0]
start_time = time.clock()
logger.info('Running init from corpus.')
for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. {0} at {1} s'.format(n, time.clock() - start_time))
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: '
'{0} x {1}'.format(len(doc_chunk), self.dim))
for i, doc in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc))
# Handles the updating as well.
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.clock()
logger.info('Built {0} shards in {1} s.'.format(self.n_shards, end_time - start_time))
def init_by_clone(self):
"""
Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().
"""
temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if temp.dim != self.dim:
if self.dim is None:
logger.info('Loaded dataset dimension: {0}'.format(temp.dim))
else:
logger.warn('Loaded dataset dimension differs from init arg '
'dimension, using loaded dim. '
'(loaded {0}, init {1})'.format(temp.dim, self.dim))
self.dim = temp.dim # To be consistent with the loaded data!
def save_shard(self, shard, n=None, filename=None):
"""
Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.
"""
new_shard = False
if n is None:
n = self.n_shards # Saving the *next* one by default.
new_shard = True
if not filename:
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append(self.offsets[-1] + shard.shape[0])
self.n_docs += shard.shape[0]
self.n_shards += 1
def load_shard(self, n):
"""
Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object."""
#logger.debug('ShardedCorpus loading shard {0}, '
# 'current shard: {1}'.format(n, self.current_shard_n))
# No-op if the shard is already open.
if self.current_shard_n == n:
return
filename = self._shard_name(n)
if not os.path.isfile(filename):
raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n))
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
def reset(self):
"""
Reset to no shard at all. Used for saving.
"""
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
def shard_by_offset(self, offset):
"""
Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.
"""
k = int(offset / self.shardsize)
if offset >= self.n_docs:
raise ValueError('Too high offset specified ({0}), available '
'docs: {1}'.format(offset, self.n_docs))
if offset < 0:
raise ValueError('Negative offset {0} currently not'
' supported.'.format(offset))
return k
k = -1
for i, o in enumerate(self.offsets):
if o > offset: # Condition should fire for every valid offset,
# since the last offset is n_docs (one-past-end).
k = i - 1 # First offset is always 0, so i is at least 1.
break
return k
def in_current(self, offset):
"""
Determine whether the given offset falls within the current shard.
"""
return (self.current_offset <= offset) \
and (offset < self.offsets[self.current_shard_n + 1])
def in_next(self, offset):
"""
Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.
"""
if self.current_shard_n == self.n_shards:
return False # There's no next shard.
return (self.offsets[self.current_shard_n + 1] <= offset) \
and (offset < self.offsets[self.current_shard_n + 2])
def resize_shards(self, shardsize):
"""
Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we're
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.
"""
# Determine how many new shards there will be
n_new_shards = int(math.floor(self.n_docs / float(shardsize)))
if self.n_docs % shardsize != 0:
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in xrange(n_new_shards):
new_start = shardsize * new_shard_idx
new_stop = new_start + shardsize
# Last shard?
if new_stop > self.n_docs:
# Sanity check
assert new_shard_idx == n_new_shards - 1, \
'Shard no. {0} that ends at {1} over last document' \
' ({2}) is not the last projected shard ({3})???' \
''.format(new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
# Clean up on unsuccessful resize.
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
# Move old shard files out, new ones in. Complicated due to possibility
# of exceptions.
old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)]
try:
for old_shard_n, old_shard_name in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.error('Exception occurred during old shard no. {0} '
'removal: {1}.\nAttempting to at least move '
'new shards in.'.format(old_shard_n, str(e)))
finally:
# If something happens with cleaning up - try to at least get the
# new guys in.
try:
for shard_n, new_shard_name in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
# If something happens when we're in this stage, we're screwed.
except Exception as e:
print(e)
raise RuntimeError('Resizing completely failed for some reason.'
' Sorry, dataset is probably ruined...')
finally:
# Sets the new shard stats.
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
def _shard_name(self, n):
"""Generate the name for the n-th shard."""
return self.output_prefix + '.' + str(n)
def _resized_shard_name(self, n):
"""
Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.
"""
return self.output_prefix + '.resize-temp.' + str(n)
def _guess_n_features(self, corpus):
"""Attempt to guess number of features in `corpus`."""
n_features = None
if hasattr(corpus, 'dim'):
# print 'Guessing from \'dim\' attribute.'
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
# print 'GUessing from dictionary.'
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
# print 'Guessing from \'n_out\' attribute.'
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
# print 'Guessing from \'num_terms\' attribute.'
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
# TransformedCorpus: first check if the transformer object
# defines some output dimension; if it doesn't, relegate guessing
# to the corpus that is being transformed. This may easily fail!
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
else:
if not self.dim:
raise TypeError('Couldn\'t find number of features, '
'refusing to guess (dimension set to {0},'
'type of corpus: {1}).'.format(self.dim, type(corpus)))
else:
logger.warn('Couldn\'t find number of features, trusting '
'supplied dimension ({0})'.format(self.dim))
n_features = self.dim
if self.dim and n_features != self.dim:
logger.warn('Discovered inconsistent dataset dim ({0}) and '
'feature count from corpus ({1}). Coercing to dimension'
' given by argument.'.format(self.dim, n_features))
return n_features
def __len__(self):
return self.n_docs
def _ensure_shard(self, offset):
# No shard loaded
if self.current_shard is None:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
# Find appropriate shard, if necessary
elif not self.in_current(offset):
if self.in_next(offset):
self.load_shard(self.current_shard_n + 1)
else:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
def get_by_offset(self, offset):
"""As opposed to getitem, this one only accepts ints as offsets."""
self._ensure_shard(offset)
result = self.current_shard[offset - self.current_offset]
return result
def __getitem__(self, offset):
"""
Retrieve the given row of the dataset. Supports slice notation.
"""
if isinstance(offset, list):
# Handle all serialization & retrieval options.
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i)
for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif not self.sparse_retrieval:
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if stop > self.n_docs:
raise IndexError('Requested slice offset {0} out of range'
' ({1} docs)'.format(stop, self.n_docs))
# - get range of shards over which to iterate
first_shard = self.shard_by_offset(start)
last_shard = self.n_shards - 1
if not stop == self.n_docs:
last_shard = self.shard_by_offset(stop)
# This fails on one-past
# slice indexing; that's why there's a code branch here.
#logger.debug('ShardedCorpus: Retrieving slice {0}: '
# 'shard {1}'.format((offset.start, offset.stop),
# (first_shard, last_shard)))
self.load_shard(first_shard)
# The easy case: both in one shard.
if first_shard == last_shard:
s_result = self.current_shard[start - self.current_offset:
stop - self.current_offset]
# Handle different sparsity settings:
s_result = self._getitem_format(s_result)
return s_result
# The hard case: the slice is distributed across multiple shards
# - initialize numpy.zeros()
s_result = numpy.zeros((stop - start, self.dim),
dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim),
dtype=self.current_shard.dtype)
# - gradually build it up. We will be using three set of start:stop
# indexes:
# - into the dataset (these are the indexes the caller works with)
# - into the current shard
# - into the result
# Indexes into current result rows. These are always smaller than
# the dataset indexes by `start` (as we move over the shards,
# we're moving by the same number of rows through the result).
result_start = 0
result_stop = self.offsets[self.current_shard_n + 1] - start
# Indexes into current shard. These are trickiest:
# - if in starting shard, these are from (start - current_offset)
# to self.shardsize
# - if in intermediate shard, these are from 0 to self.shardsize
# - if in ending shard, these are from 0
# to (stop - current_offset)
shard_start = start - self.current_offset
shard_stop = self.offsets[self.current_shard_n + 1] - \
self.current_offset
#s_result[result_start:result_stop] = self.current_shard[
# shard_start:shard_stop]
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
# First and last get special treatment, these are in between
for shard_n in xrange(first_shard+1, last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start,
result_stop, shard_start,
shard_stop)
# Last shard
self.load_shard(last_shard)
result_start = result_stop
result_stop += stop - self.current_offset
shard_start = 0
shard_stop = stop - self.current_offset
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
"""
Add the rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the self.sparse_serialize setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Returns the resulting s_result.
"""
if (result_stop - result_start) != (stop - start):
raise ValueError('Result start/stop range different than stop/start'
'range (%d - %d vs. %d - %d)'.format(result_start,
result_stop,
start, stop))
# Dense data: just copy using numpy's slice notation
if not self.sparse_serialization:
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
# A bit more difficult, we're using a different structure to build the
# result.
else:
if s_result.shape != (result_start, self.dim):
raise ValueError('Assuption about sparse s_result shape '
'invalid: {0} expected rows, {1} real '
'rows.'.format(result_start,
s_result.shape[0]))
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
def _getitem_format(self, s_result):
if self.sparse_serialization:
if self.gensim:
s_result = self._getitem_sparse2gensim(s_result)
elif not self.sparse_retrieval:
s_result = numpy.array(s_result.todense())
else:
if self.gensim:
s_result = self._getitem_dense2gensim(s_result)
elif self.sparse_retrieval:
s_result = sparse.csr_matrix(s_result)
return s_result
def _getitem_sparse2gensim(self, result):
"""
Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.
"""
def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx+1]]
g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0]))
return output
def _getitem_dense2gensim(self, result):
"""Change given dense result matrix to gensim sparse vectors."""
if len(result.shape) == 1:
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i])
for i in xrange(result.shape[0]))
return output
# Overriding the IndexedCorpus and other corpus superclass methods
def __iter__(self):
"""
Yield dataset items one by one (generator).
"""
for i in xrange(len(self)):
yield self[i]
def save(self, *args, **kwargs):
"""
Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.
"""
# Can we save to a different file than output_prefix? Well, why not?
if len(args) == 0:
args = tuple([self.output_prefix])
attrs_to_ignore = ['current_shard',
'current_shard_n',
'current_offset']
if 'ignore' not in kwargs:
kwargs['ignore'] = frozenset(attrs_to_ignore)
else:
kwargs['ignore'] = frozenset([v for v in kwargs['ignore']]
+ attrs_to_ignore)
super(ShardedCorpus, self).save(*args, **kwargs)
#
# self.reset()
# with smart_open(self.output_prefix, 'wb') as pickle_handle:
# cPickle.dump(self, pickle_handle)
@classmethod
def load(cls, fname, mmap=None):
"""
Load itself in clean state. `mmap` has no effect here.
"""
return super(ShardedCorpus, cls).load(fname, mmap)
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000,
metadata=False, **kwargs):
"""
Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.
"""
ShardedCorpus(fname, corpus, **kwargs)
@classmethod
def serialize(serializer, fname, corpus, id2word=None,
index_fname=None, progress_cnt=None, labels=None,
metadata=False, **kwargs):
"""
Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass."""
serializer.save_corpus(fname, corpus, id2word=id2word,
progress_cnt=progress_cnt, metadata=metadata,
**kwargs)
| lgpl-2.1 |
fredhusser/scikit-learn | sklearn/tests/test_metaestimators.py | 225 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
Averroes/statsmodels | statsmodels/formula/tests/test_formula.py | 29 | 4647 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 154 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
scalyr/scalyr-agent-2 | scalyr_agent/third_party/pymysql/tests/test_SSCursor.py | 2 | 3766 | import sys
try:
from pymysql.tests import base
import pymysql.cursors
from pymysql.constants import CLIENT
except Exception:
# For local testing from top-level directory, without installing
sys.path.append('../pymysql')
from pymysql.tests import base
import pymysql.cursors
from pymysql.constants import CLIENT
class TestSSCursor(base.PyMySQLTestCase):
def test_SSCursor(self):
affected_rows = 18446744073709551615
conn = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
data = [
('America', '', 'America/Jamaica'),
('America', '', 'America/Los_Angeles'),
('America', '', 'America/Lima'),
('America', '', 'America/New_York'),
('America', '', 'America/Menominee'),
('America', '', 'America/Havana'),
('America', '', 'America/El_Salvador'),
('America', '', 'America/Costa_Rica'),
('America', '', 'America/Denver'),
('America', '', 'America/Detroit'),]
cursor = conn.cursor(pymysql.cursors.SSCursor)
# Create table
cursor.execute('CREATE TABLE tz_data ('
'region VARCHAR(64),'
'zone VARCHAR(64),'
'name VARCHAR(64))')
conn.begin()
# Test INSERT
for i in data:
cursor.execute('INSERT INTO tz_data VALUES (%s, %s, %s)', i)
self.assertEqual(conn.affected_rows(), 1, 'affected_rows does not match')
conn.commit()
# Test fetchone()
iter = 0
cursor.execute('SELECT * FROM tz_data')
while True:
row = cursor.fetchone()
if row is None:
break
iter += 1
# Test cursor.rowcount
self.assertEqual(cursor.rowcount, affected_rows,
'cursor.rowcount != %s' % (str(affected_rows)))
# Test cursor.rownumber
self.assertEqual(cursor.rownumber, iter,
'cursor.rowcount != %s' % (str(iter)))
# Test row came out the same as it went in
self.assertEqual((row in data), True,
'Row not found in source data')
# Test fetchall
cursor.execute('SELECT * FROM tz_data')
self.assertEqual(len(cursor.fetchall()), len(data),
'fetchall failed. Number of rows does not match')
# Test fetchmany
cursor.execute('SELECT * FROM tz_data')
self.assertEqual(len(cursor.fetchmany(2)), 2,
'fetchmany failed. Number of rows does not match')
# So MySQLdb won't throw "Commands out of sync"
while True:
res = cursor.fetchone()
if res is None:
break
# Test update, affected_rows()
cursor.execute('UPDATE tz_data SET zone = %s', ['Foo'])
conn.commit()
self.assertEqual(cursor.rowcount, len(data),
'Update failed. affected_rows != %s' % (str(len(data))))
# Test executemany
cursor.executemany('INSERT INTO tz_data VALUES (%s, %s, %s)', data)
self.assertEqual(cursor.rowcount, len(data),
'executemany failed. cursor.rowcount != %s' % (str(len(data))))
# Test multiple datasets
cursor.execute('SELECT 1; SELECT 2; SELECT 3')
self.assertListEqual(list(cursor), [(1, )])
self.assertTrue(cursor.nextset())
self.assertListEqual(list(cursor), [(2, )])
self.assertTrue(cursor.nextset())
self.assertListEqual(list(cursor), [(3, )])
self.assertFalse(cursor.nextset())
cursor.execute('DROP TABLE IF EXISTS tz_data')
cursor.close()
__all__ = ["TestSSCursor"]
if __name__ == "__main__":
import unittest
unittest.main()
| apache-2.0 |
jaor/python | bigml/api_handlers/modelhandler.py | 2 | 6695 | # -*- coding: utf-8 -*-
#pylint: disable=abstract-method
#
# Copyright 2014-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for models' REST calls
https://bigml.com/api/models
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_resource_type, check_resource, \
get_cluster_id
from bigml.constants import (MODEL_PATH, CLUSTER_PATH, DATASET_PATH,
TINY_RESOURCE)
class ModelHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the ModelHandler. This class is intended to be
used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.model_url = self.url + MODEL_PATH
def create_model(self, origin_resource, args=None, wait_time=3, retries=10):
"""Creates a model from an origin_resource.
Uses a remote resource to create a new model using the
arguments in `args`.
The allowed remote resources can be:
- dataset
- list of datasets
- cluster
In the case of using cluster id as origin_resource, a centroid must
also be provided in the args argument. The first centroid is used
otherwise.
"""
create_args = {}
if args is not None:
create_args.update(args)
if isinstance(origin_resource, list):
# mutidatasets
create_args = self._set_create_from_datasets_args(
origin_resource, args=create_args, wait_time=wait_time,
retries=retries)
else:
resource_type = get_resource_type(origin_resource)
# model from cluster and centroid
if resource_type == CLUSTER_PATH:
cluster_id = get_cluster_id(origin_resource)
cluster = check_resource(cluster_id,
query_string=TINY_RESOURCE,
wait_time=wait_time,
retries=retries,
raise_on_error=True, api=self)
if 'centroid' not in create_args:
try:
centroid = list(cluster['object'][
'cluster_models'].keys())[0]
create_args.update({'centroid': centroid})
except KeyError:
raise KeyError("Failed to generate the model. A "
"centroid id is needed in the args "
"argument to generate a model from "
"a cluster.")
create_args.update({'cluster': cluster_id})
elif resource_type == DATASET_PATH:
create_args = self._set_create_from_datasets_args(
origin_resource, args=create_args, wait_time=wait_time,
retries=retries)
else:
raise Exception("A dataset, list of dataset ids"
" or cluster id plus centroid id are needed"
" to create a"
" dataset. %s found." % resource_type)
body = json.dumps(create_args)
return self._create(self.model_url, body)
def get_model(self, model, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves a model.
The model parameter should be a string containing the
model id or the dict returned by create_model.
As model is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the model values and state info
available at the time it is called.
If this is a shared model, the username and sharing api key must
also be provided.
If it's a model inside an ensemble or fusion, the shared_ref is
needed.
"""
check_resource_type(model, MODEL_PATH,
message="A model id is needed.")
return self.get_resource(model,
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def model_is_ready(self, model, **kwargs):
"""Checks whether a model's status is FINISHED.
"""
check_resource_type(model, MODEL_PATH,
message="A model id is needed.")
resource = self.get_model(model, **kwargs)
return resource_is_ready(resource)
def list_models(self, query_string=''):
"""Lists all your models.
"""
return self._list(self.model_url, query_string)
def update_model(self, model, changes):
"""Updates a model.
"""
check_resource_type(model, MODEL_PATH,
message="A model id is needed.")
return self.update_resource(model, changes)
def delete_model(self, model):
"""Deletes a model.
"""
check_resource_type(model, MODEL_PATH,
message="A model id is needed.")
return self.delete_resource(model)
def clone_model(self, model,
args=None, wait_time=3, retries=10):
"""Creates a cloned model from an existing `model`
"""
create_args = self._set_clone_from_args(
model, "model", args=args, wait_time=wait_time,
retries=retries)
body = json.dumps(create_args)
return self._create(self.model_url, body)
| apache-2.0 |
raghavrv/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 79 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 255 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
frank-tancf/scikit-learn | sklearn/datasets/tests/test_kddcup99.py | 57 | 1336 | """Test kddcup99 loader. Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
The test is skipped if the data wasn't previously fetched and saved to
scikit-learn data folder.
"""
import errno
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
data_shuffled = fetch_kddcup99(shuffle=True, random_state=0)
assert_equal(data.data.shape, data_shuffled.data.shape)
assert_equal(data.target.shape, data_shuffled.target.shape)
data = fetch_kddcup99('SA')
assert_equal(data.data.shape, (100655, 41))
assert_equal(data.target.shape, (100655,))
data = fetch_kddcup99('SF')
assert_equal(data.data.shape, (73237, 4))
assert_equal(data.target.shape, (73237,))
data = fetch_kddcup99('http')
assert_equal(data.data.shape, (58725, 3))
assert_equal(data.target.shape, (58725,))
data = fetch_kddcup99('smtp')
assert_equal(data.data.shape, (9571, 3))
assert_equal(data.target.shape, (9571,))
| bsd-3-clause |
ecobost/pipeline | python/pipeline/temperature.py | 5 | 5751 | import datajoint as dj
from pipeline import experiment
from commons import lab
from datajoint.jobs import key_hash
import os
import numpy as np
from .utils import h5, signal
from .exceptions import PipelineException
from . import notify
schema = dj.schema('pipeline_temperature')
@schema
class Temperature(dj.Imported):
definition = """ # temperature across the scan
-> experiment.Scan
---
temp_time : longblob # (secs) times of each temperature sample in behavior clock
temperatures : longblob # (Celsius) temperature trace
median_temperature : float # (Celsius) median temperature over the recording
temp_ts=CURRENT_TIMESTAMP : timestamp
"""
@property
def key_source(self):
return experiment.Scan() & experiment.Scan.BehaviorFile().proj()
def _make_tuples(self, key):
# Get behavior filename
behavior_path = (experiment.Session() & key).fetch1('behavior_path')
local_path = lab.Paths().get_local_path(behavior_path)
filename = (experiment.Scan.BehaviorFile() & key).fetch1('filename')
full_filename = os.path.join(local_path, filename)
# Read file
data = h5.read_behavior_file(full_filename)
# Get counter timestamps and convert to seconds
ts = h5.ts2sec(data['ts'], is_packeted=True)
# Read temperature (if available) and invalidate points with unreliable timestamps
temp_raw = data.get('temperature', None)
if temp_raw is None:
raise PipelineException('Scan {animal_id}-{session}-{scan_idx} does not have '
'temperature data'.format(**key))
temp_raw[np.isnan(ts)] = float('nan')
# Read temperature and smooth it
temp_celsius = (temp_raw * 100 - 32) / 1.8 # F to C
sampling_rate = int(round(1 / np.nanmedian(np.diff(ts)))) # samples per second
smooth_temp = signal.low_pass_filter(temp_celsius, sampling_rate, cutoff_freq=1,
filter_size=2 * sampling_rate)
# Resample at 1 Hz
downsampled_ts = ts[::sampling_rate]
downsampled_temp = smooth_temp[::sampling_rate]
# Insert
self.insert1({**key, 'temp_time': downsampled_ts,
'temperatures': downsampled_temp,
'median_temperature': np.nanmedian(downsampled_temp)})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
ts, temperatures = (self & key).fetch1('temp_time', 'temperatures')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 5))
plt.plot(ts, temperatures)
plt.ylabel('Temperature (C)')
plt.xlabel('Seconds')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = 'temperature for {animal_id}-{session}-{scan_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def session_plot(self):
""" Do a plot of how temperature progress through a session"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single session
session_key = self.fetch('KEY', limit=1)[0]
session_key.pop('scan_idx')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get times and timestamps, scan_ts
scan_indices, ts, temperatures = self.fetch('scan_idx', 'temp_time',
'temperatures', order_by='scan_idx')
session_ts = (experiment.Session() & self).fetch1('session_ts')
scan_ts = (experiment.Scan() & self).fetch('scan_ts', order_by='scan_idx')
abs_ts = [(sts - session_ts).seconds + (t - t[0]) for sts, t in zip(scan_ts, ts)]
# Plot
fig = plt.figure(figsize=(10, 5))
for abs_ts_, temp_, scan_idx in zip(abs_ts, temperatures, scan_indices):
plt.plot(abs_ts_ / 3600, temp_, label='Scan {}'.format(scan_idx)) # in hours
plt.title('Temperature for {animal_id}-{session} starting at {session_ts}'.format(
session_ts=session_ts, **session_key))
plt.ylabel('Temperature (Celsius)')
plt.xlabel('Hour')
plt.legend()
# Plot formatting
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(0.5))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class TempDrift(dj.Computed):
definition = """ # assuming temperature increases/decreases consistently, compute rate of change
-> Temperature
---
temp_slope : float # (C/hour) change in temperature
rmse : float # (C) root mean squared error of the fit
"""
def _make_tuples(self, key):
# Get all times and temperatures
ts, temperatures = (Temperature() & key).fetch1('temp_time', 'temperatures')
ts = ts[~np.isnan(temperatures)]
temperatures = temperatures[~np.isnan(temperatures)]
# Fit a line (robust regression)
from sklearn import linear_model
X = ts.reshape(-1, 1)
y = temperatures
model = linear_model.TheilSenRegressor()
model.fit(X, y)
# Get results
z_slope = model.coef_[0] * 3600 # C/hour
rmse = np.sqrt(np.mean((temperatures - model.predict(X)) ** 2))
self.insert1({**key, 'temp_slope': z_slope, 'rmse': rmse}) | lgpl-3.0 |
FCH808/FCH808.github.io | Intro to Machine Learning/ud120-projects/pca/eigenfaces.py | 5 | 4980 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# fot machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| mit |
andrewcmyers/tensorflow | tensorflow/contrib/keras/python/keras/datasets/cifar.py | 84 | 1542 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by the CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import cPickle
def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
Arguments:
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
Returns:
A tuple `(data, labels)`.
"""
f = open(fpath, 'rb')
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
f.close()
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
| apache-2.0 |
yonglehou/scikit-learn | examples/mixture/plot_gmm_selection.py | 247 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/mixture/tests/test_gmm.py | 199 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/stats/stats.py | 7 | 186886 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma, zeros
from scipy._lib.six import callable, string_types
from scipy._lib._version import NumpyVersion
import scipy.special as special
import scipy.linalg as linalg
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._stats import _kendall_dis, _toint64, _weightedrankedtau
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'iqr', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata',
'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0):
# Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified
axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation, keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
if a_contains_nan:
a = ma.masked_invalid(a)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
b_contains_nan, nan_policy = _contains_nan(b, nan_policy)
if a_contains_nan or b_contains_nan:
b = ma.masked_invalid(b)
if nan_policy == 'propagate':
rho, pval = mstats_basic.spearmanr(a, b, axis)
return SpearmanrResult(rho * np.nan, pval * np.nan)
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, b, axis)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that if the input contains nan
'omit' delegates to mstats_basic.kendalltau(), which has a different
implementation.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
# Limit range to fix computational errors
return KendalltauResult(min(1., max(-1., tau)), pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Computes a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank: array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.71813413296990281, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.min(x)):
x = _toint64(x)
if np.isnan(np.min(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| mit |
Leminen/project_template_deeplearning | src/models/logreg_example.py | 1 | 7774 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 16:43:52 2017
@author: leminen
"""
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import datetime
import argparse
import shlex
import src.utils as utils
import src.data.util_data as util_data
def hparams_parser_train(hparams_string):
parser = argparse.ArgumentParser()
parser.add_argument('--epoch_max',
type=int, default='100',
help='Max number of epochs to run')
parser.add_argument('--batch_size',
type=int, default='64',
help='Number of samples in each batch')
## add more model parameters to enable configuration from terminal
return parser.parse_args(shlex.split(hparams_string))
def hparams_parser_evaluate(hparams_string):
parser = argparse.ArgumentParser()
parser.add_argument('--epoch_no',
type=int,
default=None,
help='Epoch no to reload')
## add more model parameters to enable configuration from terminal
return parser.parse_args(shlex.split(hparams_string))
class logreg_example(object):
def __init__(self, dataset, id):
self.model = 'logreg_example'
if id != None:
self.model = self.model + '_' + id
self.dir_base = 'models/' + self.model
self.dir_logs = self.dir_base + '/logs'
self.dir_checkpoints = self.dir_base + '/checkpoints'
self.dir_results = self.dir_base + '/results'
utils.checkfolder(self.dir_checkpoints)
utils.checkfolder(self.dir_logs)
utils.checkfolder(self.dir_results)
# Specify valid dataset for model
if dataset == 'MNIST':
self.dateset_filenames = ['data/processed/MNIST/train.tfrecord']
self.lbl_dim = 10
else:
raise ValueError('Selected Dataset is not supported by model: logreg_example')
def _create_inference(self, inputs):
""" Define the inference model for the network
Args:
Returns:
"""
X = tf.reshape(inputs,[-1,784])
w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')
b = tf.Variable(tf.zeros([1, 10]), name="bias")
outputs = tf.matmul(X, w) + b
return outputs
def _create_losses(self, outputs, labels):
""" Define loss function[s] for the network
Args:
Returns:
"""
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=labels, name='loss')
loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch
return loss
def _create_optimizer(self, loss):
""" Create optimizer for the network
Args:
Returns:
"""
optimizer = tf.train.AdamOptimizer(learning_rate = 0.01)
optimizer_op = optimizer.minimize(loss)
return optimizer_op
def _create_summaries(self, loss):
""" Create summaries for the network
Args:
Returns:
"""
### Add summaries
with tf.name_scope("summaries"):
tf.summary.scalar('model_loss', loss) # placeholder summary
summary_op = tf.summary.merge_all()
return summary_op
def train(self, hparams_string):
""" Run training of the network
Args:
Returns:
"""
args_train = hparams_parser_train(hparams_string)
batch_size = args_train.batch_size
epoch_max = args_train.epoch_max
utils.save_model_configuration(args_train, self.dir_base)
# Use dataset for loading in datasamples from .tfrecord (https://www.tensorflow.org/programmers_guide/datasets#consuming_tfrecord_data)
# The iterator will get a new batch from the dataset each time a sess.run() is executed on the graph.
dataset = tf.data.TFRecordDataset(self.dateset_filenames)
dataset = dataset.map(util_data.decode_image) # decoding the tfrecord
dataset = dataset.map(self._preProcessData) # potential local preprocessing of data
dataset = dataset.shuffle(buffer_size = 10000, seed = None)
dataset = dataset.batch(batch_size = batch_size)
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
# depends on self._preProcessData
[in_image, in_label] = inputs
# show network architecture
utils.show_all_variables()
# define model, loss, optimizer and summaries.
outputs = self._create_inference(in_image)
loss = self._create_losses(outputs, in_label)
optimizer_op = self._create_optimizer(loss)
summary_op = self._create_summaries(loss)
with tf.Session() as sess:
# Initialize all model Variables.
sess.run(tf.global_variables_initializer())
# Create Saver object for loading and storing checkpoints
saver = tf.train.Saver()
# Create Writer object for storing graph and summaries for TensorBoard
writer = tf.summary.FileWriter(self.dir_logs, sess.graph)
# Reload Tensor values from latest checkpoint
ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
epoch_start = 0
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
epoch_start = int(ckpt_name.split('-')[-1])
interationCnt = 0
# Do training loops
for epoch_n in range(epoch_start, epoch_max):
# Initiate or Re-initiate iterator
sess.run(iterator.initializer)
# Test model output before any training
if epoch_n == 0:
summary = sess.run(summary_op)
writer.add_summary(summary, global_step=-1)
utils.show_message('Running training epoch no: {0}'.format(epoch_n))
while True:
try:
_, summary = sess.run([optimizer_op, summary_op])
writer.add_summary(summary, global_step=interationCnt)
counter =+ 1
except tf.errors.OutOfRangeError:
# Do some evaluation after each Epoch
break
if epoch_n % 1 == 0:
saver.save(sess,os.path.join(self.dir_checkpoints, self.model + '.model'), global_step=epoch_n)
def evaluate(self, hparams_string):
""" Run prediction of the network
Args:
Returns:
"""
args_evaluate = hparams_parser_evaluate(hparams_string)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def _preProcessData(self, image_proto, lbl_proto, class_proto, height_proto, width_proto, channels_proto, origin_proto):
""" Local preprocessing of data from dataset
also used to select which elements to parse onto the model
Args:
all outputs of util_data.decode_image
Returns:
"""
image = image_proto
label = tf.one_hot(lbl_proto, self.lbl_dim)
return image, label | mit |
nightjean/Deep-Learning | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_benchmark_test.py | 82 | 8976 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression test for DNNLinearCombinedEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tempfile
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import server_lib
# Desired training steps, reported in benchmark. Actual steps might be slightly
# more than this since supervisor training runs for a non-detrministic number of
# steps.
_ITERS = 100
_METRIC_KEYS = {
'accuracy',
'auc',
'accuracy/threshold_0.500000_mean',
'loss',
'precision/positive_threshold_0.500000_mean',
'recall/positive_threshold_0.500000_mean',
}
class DNNLinearCombinedClassifierBenchmark(test.Benchmark):
def _assertSingleClassMetrics(self, metrics):
estimator_test_utils.assert_in_range(0.9, 1.0, 'auc', metrics)
estimator_test_utils.assert_in_range(0.9, 1.0,
'accuracy/threshold_0.500000_mean',
metrics)
estimator_test_utils.assert_in_range(
0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics)
estimator_test_utils.assert_in_range(
0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics)
self._assertCommonMetrics(metrics)
def _assertCommonMetrics(self, metrics):
estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step',
metrics)
estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics)
estimator_test_utils.assert_in_range(0.0, 0.2, 'loss', metrics)
self.report_benchmark(
iters=metrics['global_step'],
extras={k: v
for k, v in metrics.items() if k in _METRIC_KEYS})
def benchmarkMatrixData(self):
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=tempfile.mkdtemp(),
linear_feature_columns=(bucketized_feature,),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3))
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkTensorData(self):
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), (-1, 1))
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=('en', 'fr', 'zh'),
indices=((0, 0), (0, 1), (60, 0)),
dense_shape=(len(iris.target), 2))
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), (-1, 1))
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(
cont_features[i],
test_data.get_quantile_based_buckets(iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=tempfile.mkdtemp(),
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=(3, 3))
metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate(
input_fn=_input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkCustomOptimizer(self):
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=tempfile.mkdtemp(),
linear_feature_columns=(bucketized_feature,),
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3),
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
input_fn = test_data.iris_input_logistic_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertSingleClassMetrics(metrics)
def benchmarkMultiClass(self):
iris = base.load_iris()
cont_feature = feature_column.real_valued_column('feature', dimension=4)
bucketized_feature = feature_column.bucketized_column(
cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=(bucketized_feature,),
dnn_feature_columns=(cont_feature,),
dnn_hidden_units=(3, 3))
input_fn = test_data.iris_input_multiclass_fn
metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate(
input_fn=input_fn, steps=100)
self._assertCommonMetrics(metrics)
def benchmarkPartitionedVariables(self):
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=('en', 'fr', 'zh'),
indices=((0, 0), (0, 1), (2, 0)),
dense_shape=(3, 2))
}
labels = constant_op.constant(((1,), (0,), (0,)))
return features, labels
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_feature = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
embedding_feature = feature_column.embedding_column(
sparse_feature, dimension=1)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=(sparse_feature,),
dnn_feature_columns=(embedding_feature,),
dnn_hidden_units=(3, 3),
config=config)
metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate(
input_fn=_input_fn, steps=100)
self._assertCommonMetrics(metrics)
if __name__ == '__main__':
test.main()
| apache-2.0 |
deniszgonjanin/ckanext-bcgov | ckanext/bcgov/logic/action.py | 2 | 28743 | # Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
import ckan.plugins.toolkit as toolkit
import logging
import datetime
import sqlalchemy
import ckan.logic as logic
import ckan.plugins as plugins
import smtplib
from time import time
import ckan.lib as lib
from email import Utils
from email.mime.text import MIMEText
from email.header import Header
from pylons import config
from ckan.common import _, c, g
import ckan.lib.plugins as lib_plugins
import ckan.lib.dictization.model_save as model_save
import ckan.lib.uploader as uploader
import ckan.lib.helpers as h
import ckan.lib.munge as munge
import paste.deploy.converters
from ckan.lib.mailer import MailerException
import ckan.model as model
from ckanext.bcgov.util.util import get_user_list
import pprint
# shortcuts
get_action = logic.get_action
_check_access = logic.check_access
NotFound = logic.NotFound
_validate = lib.navl.dictization_functions.validate
ValidationError = logic.ValidationError
_get_action = logic.get_action
log = logging.getLogger('ckanext.edc_schema')
_or_ = sqlalchemy.or_
'''
Checking package status and sending a notification if the state is changed.
'''
def add_msg_niceties(recipient_name, body, sender_name, sender_url):
return "Dear %s,<br><br>" % recipient_name \
+ "\r\n\r\n%s\r\n\r\n" % body \
+ "<br><br>--<br>\r\n%s (<a href=\"%s\">%s</a>)" % (sender_name, sender_url, sender_url)
def send_state_change_notifications(members, email_dict, sender_name, sender_url):
'''
Sends state change notifications to sub-org members.
Updated by Khalegh Mamakani on March 5 2015.
List of changes :
- Creating the smtp connection once for all notifications instead of connecting and disconnecting for
every single recipient.
- Using a thread to send the notifications in the background.
'''
#Email common fields
subject = email_dict['subject']
mail_from = config.get('smtp.mail_from')
subject = Header(subject.encode('utf-8'), 'utf-8')
# Connecting to smtp server.
smtp_connection = smtplib.SMTP()
if 'smtp.test_server' in config:
# If 'smtp.test_server' is configured we assume we're running tests,
# and don't use the smtp.server, starttls, user, password etc. options.
smtp_server = config['smtp.test_server']
smtp_starttls = False
smtp_user = None
smtp_password = None
else:
smtp_server = config.get('smtp.server', 'localhost')
smtp_starttls = paste.deploy.converters.asbool(
config.get('smtp.starttls'))
smtp_user = config.get('smtp.user')
smtp_password = config.get('smtp.password')
smtp_connection.connect(smtp_server)
try:
# Identify ourselves and prompt the server for supported features.
smtp_connection.ehlo()
# If 'smtp.starttls' is on in CKAN config, try to put the SMTP
# connection into TLS mode.
if smtp_starttls:
if smtp_connection.has_extn('STARTTLS'):
smtp_connection.starttls()
# Re-identify ourselves over TLS connection.
smtp_connection.ehlo()
else:
raise MailerException("SMTP server does not support STARTTLS")
# If 'smtp.user' is in CKAN config, try to login to SMTP server.
if smtp_user:
assert smtp_password, ("If smtp.user is configured then "
"smtp.password must be configured as well.")
smtp_connection.login(smtp_user, smtp_password)
'''
Adding extra email fields and Sending notification for each individual member.
'''
for member in members :
if member.email :
body = email_dict['body']
msg = MIMEText(body.encode('utf-8'), 'html', 'utf-8')
msg['Subject'] = subject
msg['From'] = "%s <%s>" % (sender_name, mail_from)
msg['Date'] = Utils.formatdate(time())
recipient_email = member.email
recipient_name = member.fullname or member.name
body = add_msg_niceties(recipient_name, body, sender_name, sender_url)
recipient = u"%s <%s>" % (recipient_name, recipient_email)
msg['To'] = Header(recipient, 'utf-8')
try :
smtp_connection.sendmail(mail_from, [recipient_email], msg.as_string())
log.info("Sent state change email to user {0} with email {1}".format(recipient_name, recipient_email))
except Exception, e:
log.error('Failed to send notification to user {0} email address : {1}'.format(recipient_email, recipient_email))
except smtplib.SMTPException, e:
msg = '%r' % e
log.exception(msg)
log.error('Failed to connect to smtp server')
finally:
smtp_connection.quit()
def check_record_state(context, old_state, new_data, site_title, site_url, dataset_url):
'''
Checks if the dataset state has been changed during the update and
informs the users involving in package management.
Updated by Khalegh Mamakani on MArch 5th 2015.
List of changes :
- replaced get_user_list with a model query to get the list of all members of the org with the given role
( Preventing action functions calls and multiple for loops)
- Removed the nested for loops for finding and sending notification to members (Replaced by a single for loop).
'''
new_state = new_data['edc_state']
#If dataset's state has not been changed do nothing
if (old_state == new_state):
return
'''
Get the organization and sub-organization data
'''
org_id = new_data.get('org')
sub_org_id = new_data.get('sub_org')
org = model.Group.get(org_id)
sub_org = model.Group.get(sub_org_id)
# Do not send emails for "DRAFT" datasets
if new_state == "DRAFT":
return
# Basic dataset info
dataset_title = new_data['title']
org_title = org.title
sub_org_title = sub_org.title
orgs_titles = org_title + ' - ' + sub_org_title
# Prepare email
subject = ''
body = ''
role = 'admin'
# Change email based on new_state changes
if new_state == 'PENDING PUBLISH' :
subject = 'EDC - PENDING PUBLISH ' + dataset_title
body = 'The following record is "Pending Publication" for ' + orgs_titles + '<br><br>\
Record <a href="' + dataset_url + '">' + dataset_url + '</a>, ' + dataset_title + '<br><br>\
Please review and act as required.'
elif new_state == 'REJECTED':
subject = 'EDC - REJECTED ' + new_data['title']
body = 'The following record is "REJECTED" for ' + orgs_titles + '<br><br>\
Record <a href="' + dataset_url + '">' + dataset_url + '</a>, ' + dataset_title + '<br><br>\
Please review and act as required.'
role = 'editor'
elif new_state == 'PUBLISHED':
subject = 'EDC - PUBLISHED ' + new_data['title']
body = 'The following record is "PUBLISHED" for ' + orgs_titles + '<br><br>\
Record <a href="' + dataset_url + '">' + dataset_url + '</a>, ' + dataset_title + '<br><br>\
Please review and act as required.'
role = 'editor'
elif new_state == 'PENDING ARCHIVE':
subject = 'EDC - PENDING ARCHIVE ' + new_data['title']
body = 'The following record is "Pending Archival" for ' + orgs_titles + '<br><br>\
Record <a href="' + dataset_url + '">' + dataset_url + '</a>, ' + dataset_title + '<br><br>\
Please review and act as required.'
elif new_state == 'ARCHIVED':
subject = 'EDC - ARCHIVED ' + new_data['title']
body = 'The following record is "ARCHIVED" for ' + orgs_titles + '<br><br>\
Record <a href="' + dataset_url + '">' + dataset_url + '</a>, ' + dataset_title + '<br><br>\
Please review and act as required.'
role = 'editor'
else :
pass
email_dict = { 'subject': subject, 'body': body }
# Get the entire list of users
'''
Get the list of sub-organization users with the given role; Added by Khalegh Mamakani
'''
log.info('Sending state change notification to organization users with role %s' %(role,))
query = model.Session.query(model.User) \
.join(model.Member, model.User.id == model.Member.table_id) \
.filter(model.Member.capacity == role) \
.filter(model.Member.group_id == sub_org.id)
members = query.all()
send_state_change_notifications(members, email_dict, site_title, site_url)
def edc_package_update(context, input_data_dict):
'''
Find a package, from the given object_name, and update it with the given fields.
1) Call __package_search to find the package
2) Check the results (success == true), (count==1)
3) Modify the data
4) Call get_action(package_update) to update the package
'''
from ckan.lib.search import SearchError
# first, do the search
q = 'object_name:' + input_data_dict.get("object_name")
fq = ''
offset = 0
limit = 2
sort = 'metadata_modified desc'
try :
data_dict = {
'q' : q,
'fq' : fq,
'start' : offset,
'rows' : limit,
'sort' : sort
}
#Use package_search to filter the list
query = get_action('package_search')(context, data_dict)
except SearchError, se :
log.error('Search error : %s', str(se))
raise SearchError(str(se))
#check the search results - there can be only 1!!
results = query['results']
the_count = query['count']
if the_count != 1:
log.error('Search for the dataset with q={0} returned 0 or more than 1 record.'.format(q))
return_dict = {}
return_dict['results'] = query
return_dict['success'] = False
return_dict['error'] = True
#results[0]['imap_layer_key'] = input_data_dict.get("imap_layer_key")
# JER - the line below was removed because we don't use the data, and getting it into the query was a nightmare
#
#results[0]['imap_display_name'] = input_data_dict.get("imap_display_name")
#results[0]['link_to_imap'] = input_data_dict.get("link_to_imap")
try :
package_dict = get_action('package_show')(context, {'id': results[0]['id']})
if not package_dict :
return_dict = {}
return_dict['success'] = False
return_dict['error'] = True
return return_dict
current_imap_link = package_dict.get('link_to_imap', None)
visibility = package_dict['metadata_visibility']
#pprint.pprint('package_dict:')
#pprint.pprint(package_dict)
#package_dict['imap_layer_key'] = input_data_dict.get("imap_layer_key")
public_map_link = config.get('edc.imap_url_pub')
private_map_link = config.get('edc.imap_url_gov')
update = {}
#don't update archived records
#Upadted by Khalegh Mamakani to update the i-map link only if it has not been done already.
new_imap_link = None
if (package_dict['edc_state'] != 'ARCHIVED'):
if (visibility == 'Public'):
if (input_data_dict.get("imap_layers_pub")):
new_imap_link = public_map_link + input_data_dict.get("imap_layers_pub")
else:
if (input_data_dict.get("imap_layers_gov")):
new_imap_link = private_map_link + input_data_dict.get("imap_layers_gov")
if (new_imap_link != None) and (new_imap_link != current_imap_link) :
log.info('Updating IMAP Link to : {0} for dataset {1}'.format(new_imap_link, package_dict.get('title')))
package_dict['link_to_imap'] = new_imap_link
update = get_action('package_update')(context, package_dict)
except Exception, ue:
log.error('Error raised when updating dataset imap_link for dataset {0}.'.format(package_dict.get('name')))
raise Exception(str(ue))
response_dict = {}
response_dict['results'] = update
return response_dict
def edc_package_update_bcgw(context, input_data_dict):
'''
Find a package, from the given object_name, and update it with the given fields.
1) Call __package_search to find the package
2) Check the results (success == true), (count==1)
3) Modify the data
4) Call get_action(package_update) to update the package
'''
from ckan.lib.search import SearchError
'''
Fixed unicode characters decoding problem.
'''
import json
input_dict_str = json.dumps(input_data_dict, ensure_ascii=False)
input_data_dict = json.loads(input_dict_str, encoding="cp1252")
update = {}
# first, do the search
q = 'object_name:' + input_data_dict.get("object_name")
fq = ''
offset = 0
limit = 2
sort = 'metadata_modified desc'
try :
data_dict = {
'q' : q,
'fq' : fq,
'start' : offset,
'rows' : limit,
'sort' : sort
}
#Use package_search to filter the list
query = get_action('package_search')(context, data_dict)
except SearchError, se :
log.error('Search error : %s', str(se))
raise SearchError(str(se))
#check the search results - there can be only 1!!
the_count = query['count']
if the_count != 1:
#raise SearchError('Search returned 0 or more than 1 item')
return_dict = {}
return_dict['results'] = query
return_dict['success'] = False
return_dict['error'] = True
return return_dict
results = query['results']
results[0]['details'] = input_data_dict.get("details")
update = None
#need the right data package
package_dict = get_action('package_show')(context, {'id': results[0]['id']})
if package_dict['edc_state'] == 'ARCHIVED' :
return_dict = {}
return_dict['results'] = None
return return_dict
if not package_dict :
return_dict = {}
return_dict['success'] = False
return_dict['error'] = True
return return_dict
#Check if input_data has been modified and is not the same as package data
data_changed = False
current_details = package_dict.get('details')
curent_obj_short_name = package_dict.get('object_short_name')
current_obj_table_comments = package_dict.get('object_table_comments')
if current_details != input_data_dict.get('details') :
log.info('Dataset details have been changed for dataset {0}.'.format(package_dict.get('title')))
log.info('Current Details : ')
log.info(current_details)
log.info('New details :')
log.info(input_data_dict.get('details'))
package_dict['details'] = input_data_dict.get('details')
data_changed = True
if curent_obj_short_name != input_data_dict.get('object_short_name') :
log.info('Dataset object_short_name has been changed for dataset {0}.'.format(package_dict.get('title')))
log.info('Current object_short_name :')
log.info(curent_obj_short_name)
log.info('New object_short_name :')
log.info(input_data_dict.get('object_short_name'))
package_dict['object_short_name'] = input_data_dict.get('object_short_name')
data_changed = True
if current_obj_table_comments != input_data_dict.get('object_table_comments') :
log.info('Dataset current_obj_table_comments has been changed for dataset {0}.'.format(package_dict.get('title')))
log.info('Current object_table_comments :')
log.info(current_obj_table_comments)
log.info('New object_table_comments :')
log.info(input_data_dict.get('object_table_comments'))
package_dict['object_table_comments'] = input_data_dict.get('object_table_comments')
data_changed = True
if data_changed :
log.info('Updating data dictionary for dataset {0}'.format(package_dict.get('title')))
update = get_action('package_update')(context, package_dict)
response_dict = {}
response_dict['results'] = update
return response_dict
def package_update(context, data_dict):
'''Update a dataset (package).
You must be authorized to edit the dataset and the groups that it belongs
to.
Plugins may change the parameters of this function depending on the value
of the dataset's ``type`` attribute, see the ``IDatasetForm`` plugin
interface.
For further parameters see ``package_create()``.
:param id: the name or id of the dataset to update
:type id: string
:returns: the updated dataset (if 'return_package_dict' is True in the
context, which is the default. Otherwise returns just the
dataset id)
:rtype: dictionary
'''
model = context['model']
user = context['user']
name_or_id = data_dict.get("id") or data_dict['name']
pkg = model.Package.get(name_or_id)
if pkg is None:
raise NotFound(_('Package was not found.'))
context["package"] = pkg
data_dict["id"] = pkg.id
# FIXME: first modifications to package_updade begin here:
# tag strings are reconstructed because validators are stripping
# tags passed and only taking taks as tag_string values
# image upload support has also been added here
old_data = get_action('package_show')(context, {'id': pkg.id})
'''
Constructing the tag_string from the given tags.
There must be at least one tag, otherwise the tag_string will be empty and a validation error
will be raised.
'''
if not data_dict.get('tag_string'):
data_dict['tag_string'] = ', '.join(
h.dict_list_reduce(data_dict.get('tags', {}), 'name'))
for key, value in old_data.iteritems() :
if key not in data_dict :
data_dict[key] = value
#data_dict['resources'] = data_dict.get('resources', old_data.get('resources'))
# iso_topic_cat = data_dict.get('iso_topic_string', [])
# if isinstance(iso_topic_cat, basestring):
# iso_topic_cat = [iso_topic_cat]
#
# data_dict['iso_topic_string'] = ','.join(iso_topic_cat)
#Set the package last modified date
data_dict['record_last_modified'] = str(datetime.date.today())
# If the Created Date has not yet been set, then set it
if data_dict['edc_state'] == 'DRAFT' and not data_dict.get('record_create_date'):
data_dict['record_create_date'] = str(datetime.date.today())
# If the Publish Date has not yet been set, then set it
if data_dict['edc_state'] == 'PUBLISHED' and not data_dict.get('record_publish_date'):
data_dict['record_publish_date'] = str(datetime.date.today())
# If the Archive Date has not yet been set, then set it
if data_dict['edc_state'] == 'ARCHIVED' and not data_dict.get('record_archive_date'):
data_dict['record_archive_date'] = str(datetime.date.today())
_check_access('package_update', context, data_dict)
# get the schema
package_plugin = lib_plugins.lookup_package_plugin(pkg.type)
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.update_package_schema()
image_url = old_data.get('image_url', None)
upload = uploader.Upload('edc', image_url)
upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload')
#Adding image display url for the uploaded image
image_url = data_dict.get('image_url')
data_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
image_url = munge.munge_filename(image_url)
data_dict['image_display_url'] = h.url_for_static('uploads/edc/%s' % data_dict.get('image_url'), qualified=True)
if 'api_version' not in context:
# check_data_dict() is deprecated. If the package_plugin has a
# check_data_dict() we'll call it, if it doesn't have the method we'll
# do nothing.
check_data_dict = getattr(package_plugin, 'check_data_dict', None)
if check_data_dict:
try:
package_plugin.check_data_dict(data_dict, schema)
except TypeError:
# Old plugins do not support passing the schema so we need
# to ensure they still work.
package_plugin.check_data_dict(data_dict)
# FIXME: modifications to package_update end here^
data, errors = _validate(data_dict, schema, context)
# log.debug('package_update validate_errs=%r user=%s package=%s data=%r',
# errors, context.get('user'),
# context.get('package').name if context.get('package') else '',
# data)
if errors:
model.Session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Update object %s') % data.get("name")
#avoid revisioning by updating directly
model.Session.query(model.Package).filter_by(id=pkg.id).update(
{"metadata_modified": datetime.datetime.utcnow()})
model.Session.refresh(pkg)
pkg = model_save.package_dict_save(data, context)
context_org_update = context.copy()
context_org_update['ignore_auth'] = True
context_org_update['defer_commit'] = True
_get_action('package_owner_org_update')(context_org_update,
{'id': pkg.id,
'organization_id': pkg.owner_org})
for item in plugins.PluginImplementations(plugins.IPackageController):
item.edit(pkg)
item.after_update(context, data)
upload.upload(uploader.get_max_image_size())
#TODO the next two blocks are copied from ckan/ckan/logic/action/update.py
# This codebase is currently hard to maintain because large chunks of the
# CKAN action API and the CKAN controllers are simply overriden. This is
# probably worse than just forking CKAN would have been, because in that
# case at least we could track changes. - @deniszgonjanin
# Needed to let extensions know the new resources ids
model.Session.flush()
if data.get('resources'):
for index, resource in enumerate(data['resources']):
resource['id'] = pkg.resources[index].id
# Create default views for resources if necessary
if data.get('resources'):
logic.get_action('package_create_default_resource_views')(
{'model': context['model'], 'user': context['user'],
'ignore_auth': True},
{'package': data})
if not context.get('defer_commit'):
model.repo.commit()
log.debug('Updated object %s' % pkg.name)
return_id_only = context.get('return_id_only', False)
# Make sure that a user provided schema is not used on package_show
context.pop('schema', None)
# we could update the dataset so we should still be able to read it.
context['ignore_auth'] = True
output = data_dict['id'] if return_id_only \
else _get_action('package_show')(context, {'id': data_dict['id']})
'''
Send state change notifications if required; Added by Khalegh Mamakani
Using a thread to run the job in the background so that package_update will not wait for notifications sending.
'''
old_state = old_data.get('edc_state')
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
dataset_url = config.get('ckan.site_url') + h.url_for(controller='package', action="read", id = data_dict['name'])
import threading
notify_thread = threading.Thread(target=check_record_state, args=(context, old_state, data_dict, g.site_title, g.site_url, dataset_url) )
notify_thread.start()
return output
def post_disqus_comment(context, comment_dict):
'''
Uses Disqus api to post a guest comment.
Comment_dict :
thread :
message :
author_email :
author_name :
'''
import urllib2
import urllib
import json
import pycurl
from disqusapi import DisqusAPI
import cStringIO
public_api = 'qUpq4pP5Kg6bKmAraTSig2lwghWO5KNqCTmiCdRHD66rgGTWKVCQloJVqvpfe5HI'
secret_api = 'r7fjQCL36LDS2fTWMjLHYZpsiN99MnXZ5D6n8byIMPPZ1x9ohMvnTDOpczHba9N9'
'''
Add the secret api to comment dictionary.
The secret api is taken from the Disqus account(Login to your Disqus account to get the secret api key).
'''
comment_dict['api_secret'] = secret_api
comment_dict['forum'] = u'h3testblog'
identifier = comment_dict['thread']
comment_dict['thread'] = 'ident:' + identifier
# Set the fields string :
fields_string = ''
url = 'http://disqus.com/api/3.0/posts/create.json';
# url= 'https://disqus.com/api/3.0/threads/list.json?api_secret=frFrznmdh6WlR5Xz9dvv6749Ong8l4hWprLdFItoa743d9SwGJ7koQLJuyhKZ7A0&forum=h3testblog'
# comment_dict = {'api_secret' : secret_api,
# 'forum': 'h3testblog'}
# data_string = urllib.quote(json.dumps(comment_dict))
#
# try:
# request = urllib2.Request('https://disqus.com/api/3.0/threads/list.json')
# request.add_header('Accept', 'application/json')
# request.add_header('Authorization', public_api)
# # request.add_header('Authorization', secret_api)
# response = urllib2.urlopen(request, data_string)
# # assert response.code == 200
#
# response_dict = json.loads(response.read())
# # assert response_dict['success'] is True
# # result = response_dict['result']
# except Exception:
# pass
#Get the thread id first :
thread_dict = {'api_secret' : secret_api,
'forum' : 'h3testblog',
'thread' : 'ident:' + identifier }
thread_string = ''
#Construct the post fields string
for key, value in thread_dict.iteritems() :
thread_string += key + '=' + value + '&'
thread_string = thread_string[:-1]
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, 'https://disqus.com/api/3.0/threads/set.json?' + thread_string)
c.setopt(pycurl.VERBOSE, 0)
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
response = json.loads(buf.getvalue()).get('response', [])
thread = None
if len(response) > 0 :
thread = response[0]
if thread:
thread_id = thread.get('id', None)
buf.close()
comment_dict['thread'] = thread_id
del comment_dict['forum']
# from disqusapi import DisqusAPI
#
# client = DisqusAPI(secret_api, public_api)
# client.posts.create(api_secret=public_api, **comment_dict)
#
#Construct the post fields string
fields_string = ''
for key, value in comment_dict.iteritems() :
fields_string += key + '=' + value + '&'
fields_string = fields_string[:-1]
buf = cStringIO.StringIO()
#Post the comment using curl
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.VERBOSE, 0)
c.setopt(c.POSTFIELDS, fields_string)
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
buf.close()
@toolkit.side_effect_free
def package_autocomplete(context, data_dict):
'''Return a list of datasets (packages) that match a string.
Datasets with names or titles that contain the query string will be
returned.
:param q: the string to search for
:type q: string
:param limit: the maximum number of resource formats to return (optional,
default: 10)
:type limit: int
:rtype: list of dictionaries
'''
_check_access('package_autocomplete', context, data_dict)
limit = data_dict.get('limit', 10)
q = data_dict['q']
q_lower = q.lower()
pkg_list = []
pkg_dict = get_action('package_search')(context, {'fq': 'title:' + q, 'rows': limit})
pkg_dict = pkg_dict['results']
for package in pkg_dict:
if package['name'].startswith(q_lower):
match_field = 'name'
match_displayed = package['name']
else:
match_field = 'title'
match_displayed = '%s (%s)' % (package['title'], package['name'])
result_dict = {'name':package['name'], 'title':package['title'],
'match_field':match_field, 'match_displayed':match_displayed}
pkg_list.append(result_dict)
return pkg_list
| agpl-3.0 |