repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
idlead/scikit-learn | sklearn/svm/setup.py | 318 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/ensemble/tests/test_forest.py | 22 | 41796 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
keras-team/keras-io | guides/working_with_rnns.py | 1 | 19767 | """
Title: Working with RNNs
Authors: Scott Zhu, Francois Chollet
Date created: 2019/07/08
Last modified: 2020/04/14
Description: Complete guide to using & customizing RNN layers.
"""
"""
## Introduction
Recurrent neural networks (RNN) are a class of neural networks that is powerful for
modeling sequence data such as time series or natural language.
Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of a
sequence, while maintaining an internal state that encodes information about the
timesteps it has seen so far.
The Keras RNN API is designed with a focus on:
- **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`,
`keras.layers.GRU` layers enable you to quickly build recurrent models without
having to make difficult configuration choices.
- **Ease of customization**: You can also define your own RNN cell layer (the inner
part of the `for` loop) with custom behavior, and use it with the generic
`keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly
prototype different research ideas in a flexible way with minimal code.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Built-in RNN layers: a simple example
"""
"""
There are three built-in RNN layers in Keras:
1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous
timestep is to be fed to next timestep.
2. `keras.layers.GRU`, first proposed in
[Cho et al., 2014](https://arxiv.org/abs/1406.1078).
3. `keras.layers.LSTM`, first proposed in
[Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf).
In early 2015, Keras had the first reusable open-source Python implementations of LSTM
and GRU.
Here is a simple example of a `Sequential` model that processes sequences of integers,
embeds each integer into a 64-dimensional vector, then processes the sequence of
vectors using a `LSTM` layer.
"""
model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
model.add(layers.LSTM(128))
# Add a Dense layer with 10 units.
model.add(layers.Dense(10))
model.summary()
"""
Built-in RNNs support a number of useful features:
- Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments
- Ability to process an input sequence in reverse, via the `go_backwards` argument
- Loop unrolling (which can lead to a large speedup when processing short sequences on
CPU), via the `unroll` argument
- ...and more.
For more information, see the
[RNN API documentation](https://keras.io/api/layers/recurrent_layers/).
"""
"""
## Outputs and states
By default, the output of a RNN layer contains a single vector per sample. This vector
is the RNN cell output corresponding to the last timestep, containing information
about the entire input sequence. The shape of this output is `(batch_size, units)`
where `units` corresponds to the `units` argument passed to the layer's constructor.
A RNN layer can also return the entire sequence of outputs for each sample (one vector
per timestep per sample), if you set `return_sequences=True`. The shape of this output
is `(batch_size, timesteps, units)`.
"""
model = keras.Sequential()
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
model.add(layers.GRU(256, return_sequences=True))
# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(10))
model.summary()
"""
In addition, a RNN layer can return its final internal state(s). The returned states
can be used to resume the RNN execution later, or
[to initialize another RNN](https://arxiv.org/abs/1409.3215).
This setting is commonly used in the
encoder-decoder sequence-to-sequence model, where the encoder final state is used as
the initial state of the decoder.
To configure a RNN layer to return its internal state, set the `return_state` parameter
to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU`
only has one.
To configure the initial state of the layer, just call the layer with additional
keyword argument `initial_state`.
Note that the shape of the state needs to match the unit size of the layer, like in the
example below.
"""
encoder_vocab = 1000
decoder_vocab = 2000
encoder_input = layers.Input(shape=(None,))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(
encoder_input
)
# Return states in addition to output
output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")(
encoder_embedded
)
encoder_state = [state_h, state_c]
decoder_input = layers.Input(shape=(None,))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(
decoder_input
)
# Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(64, name="decoder")(
decoder_embedded, initial_state=encoder_state
)
output = layers.Dense(10)(decoder_output)
model = keras.Model([encoder_input, decoder_input], output)
model.summary()
"""
## RNN layers and RNN cells
In addition to the built-in RNN layers, the RNN API also provides cell-level APIs.
Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only
processes a single timestep.
The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a
`keras.layers.RNN` layer gives you a layer capable of processing batches of
sequences, e.g. `RNN(LSTMCell(10))`.
Mathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact,
the implementation of this layer in TF v1.x was just creating the corresponding RNN
cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM`
layers enable the use of CuDNN and you may see better performance.
There are three built-in RNN cells, each of them corresponding to the matching RNN
layer.
- `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.
- `keras.layers.GRUCell` corresponds to the `GRU` layer.
- `keras.layers.LSTMCell` corresponds to the `LSTM` layer.
The cell abstraction, together with the generic `keras.layers.RNN` class, make it
very easy to implement custom RNN architectures for your research.
"""
"""
## Cross-batch statefulness
When processing very long sequences (possibly infinite), you may want to use the
pattern of **cross-batch statefulness**.
Normally, the internal state of a RNN layer is reset every time it sees a new batch
(i.e. every sample seen by the layer is assumed to be independent of the past). The
layer will only maintain a state while processing a given sample.
If you have very long sequences though, it is useful to break them into shorter
sequences, and to feed these shorter sequences sequentially into a RNN layer without
resetting the layer's state. That way, the layer can retain information about the
entirety of the sequence, even though it's only seeing one sub-sequence at a time.
You can do this by setting `stateful=True` in the constructor.
If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.
```
s1 = [t0, t1, ... t100]
s2 = [t101, ... t201]
...
s16 = [t1501, ... t1547]
```
Then you would process it via:
```python
lstm_layer = layers.LSTM(64, stateful=True)
for s in sub_sequences:
output = lstm_layer(s)
```
When you want to clear the state, you can use `layer.reset_states()`.
> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of
sample `i` in the previous batch. This means that all batches should contain the same
number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100,
sequence_B_from_t0_to_t100]`, the next batch should contain
`[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.
Here is a complete example:
"""
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
output = lstm_layer(paragraph3)
# reset_states() will reset the cached state to the original initial_state.
# If no initial_state was provided, zero-states will be used by default.
lstm_layer.reset_states()
"""
### RNN State Reuse
<a id="rnn_state_reuse"></a>
"""
"""
The recorded states of the RNN layer are not included in the `layer.weights()`. If you
would like to reuse the state from a RNN layer, you can retrieve the states value by
`layer.states` and use it as the
initial state for a new layer via the Keras functional API like `new_layer(inputs,
initial_state=layer.states)`, or model subclassing.
Please also note that sequential model might not be used in this case since it only
supports layers with single input and output, the extra input of initial state makes
it impossible to use here.
"""
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
existing_state = lstm_layer.states
new_lstm_layer = layers.LSTM(64)
new_output = new_lstm_layer(paragraph3, initial_state=existing_state)
"""
## Bidirectional RNNs
For sequences other than time series (e.g. text), it is often the case that a RNN model
can perform better if it not only processes sequence from start to end, but also
backwards. For example, to predict the next word in a sentence, it is often useful to
have the context around the word, not only just the words that come before it.
Keras provides an easy API for you to build such bidirectional RNNs: the
`keras.layers.Bidirectional` wrapper.
"""
model = keras.Sequential()
model.add(
layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10))
)
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(10))
model.summary()
"""
Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the
`go_backwards` field of the newly copied layer, so that it will process the inputs in
reverse order.
The output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layer
output and the backward layer output. If you need a different merging behavior, e.g.
concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper
constructor. For more details about `Bidirectional`, please check
[the API docs](https://keras.io/api/layers/recurrent_layers/bidirectional/).
"""
"""
## Performance optimization and CuDNN kernels
In TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN
kernels by default when a GPU is available. With this change, the prior
`keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your
model without worrying about the hardware it will run on.
Since the CuDNN kernel is built with certain assumptions, this means the layer **will
not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or
GRU layers**. E.g.:
- Changing the `activation` function from `tanh` to something else.
- Changing the `recurrent_activation` function from `sigmoid` to something else.
- Using `recurrent_dropout` > 0.
- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner
`tf.while_loop` into an unrolled `for` loop.
- Setting `use_bias` to False.
- Using masking when the input data is not strictly right padded (if the mask
corresponds to strictly right padded data, CuDNN can still be used. This is the most
common case).
For the detailed list of constraints, please see the documentation for the
[LSTM](https://keras.io/api/layers/recurrent_layers/lstm/) and
[GRU](https://keras.io/api/layers/recurrent_layers/gru/) layers.
"""
"""
### Using CuDNN kernels when available
Let's build a simple LSTM model to demonstrate the performance difference.
We'll use as input sequences the sequence of rows of MNIST digits (treating each row of
pixels as a timestep), and we'll predict the digit's label.
"""
batch_size = 64
# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).
# Each input sequence will be of size (28, 28) (height is treated like time).
input_dim = 28
units = 64
output_size = 10 # labels are from 0 to 9
# Build the RNN model
def build_model(allow_cudnn_kernel=True):
# CuDNN is only available at the layer level, and not at the cell level.
# This means `LSTM(units)` will use the CuDNN kernel,
# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
if allow_cudnn_kernel:
# The LSTM layer with default options uses CuDNN.
lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim))
else:
# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
lstm_layer = keras.layers.RNN(
keras.layers.LSTMCell(units), input_shape=(None, input_dim)
)
model = keras.models.Sequential(
[
lstm_layer,
keras.layers.BatchNormalization(),
keras.layers.Dense(output_size),
]
)
return model
"""
Let's load the MNIST dataset:
"""
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
sample, sample_label = x_train[0], y_train[0]
"""
Let's create a model instance and train it.
We choose `sparse_categorical_crossentropy` as the loss function for the model. The
output of the model has shape of `[batch_size, 10]`. The target for the model is an
integer vector, each of the integer is in the range of 0 to 9.
"""
model = build_model(allow_cudnn_kernel=True)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
"""
Now, let's compare to a model that does not use the CuDNN kernel:
"""
noncudnn_model = build_model(allow_cudnn_kernel=False)
noncudnn_model.set_weights(model.get_weights())
noncudnn_model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
noncudnn_model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
"""
When running on a machine with a NVIDIA GPU and CuDNN installed,
the model built with CuDNN is much faster to train compared to the
model that uses the regular TensorFlow kernel.
The same CuDNN-enabled model can also be used to run inference in a CPU-only
environment. The `tf.device` annotation below is just forcing the device placement.
The model will run on CPU by default if no GPU is available.
You simply don't have to worry about the hardware you're running on anymore. Isn't that
pretty cool?
"""
import matplotlib.pyplot as plt
with tf.device("CPU:0"):
cpu_model = build_model(allow_cudnn_kernel=True)
cpu_model.set_weights(model.get_weights())
result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)
print(
"Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label)
)
plt.imshow(sample, cmap=plt.get_cmap("gray"))
"""
## RNNs with list/dict inputs, or nested inputs
Nested structures allow implementers to include more information within a single
timestep. For example, a video frame could have audio and video input at the same
time. The data shape in this case could be:
`[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]`
In another example, handwriting data could have both coordinates x and y for the
current position of the pen, as well as pressure information. So the data
representation could be:
`[batch, timestep, {"location": [x, y], "pressure": [force]}]`
The following code provides an example of how to build a custom RNN cell that accepts
such structured inputs.
"""
"""
### Define a custom cell that supports nested input/output
"""
"""
See [Making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)
for details on writing your own layers.
"""
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
super(NestedCell, self).__init__(**kwargs)
def build(self, input_shapes):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
i1 = input_shapes[0][1]
i2 = input_shapes[1][1]
i3 = input_shapes[1][2]
self.kernel_1 = self.add_weight(
shape=(i1, self.unit_1), initializer="uniform", name="kernel_1"
)
self.kernel_2_3 = self.add_weight(
shape=(i2, i3, self.unit_2, self.unit_3),
initializer="uniform",
name="kernel_2_3",
)
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
input_1, input_2 = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(input_1, self.kernel_1)
output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = (output_1, output_2_3)
new_states = (state_1, state_2_3)
return output, new_states
def get_config(self):
return {"unit_1": self.unit_1, "unit_2": unit_2, "unit_3": self.unit_3}
"""
### Build a RNN model with nested input/output
Let's build a Keras model that uses a `keras.layers.RNN` layer and the custom cell
we just defined.
"""
unit_1 = 10
unit_2 = 20
unit_3 = 30
i1 = 32
i2 = 64
i3 = 32
batch_size = 64
num_batches = 10
timestep = 50
cell = NestedCell(unit_1, unit_2, unit_3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((None, i1))
input_2 = keras.Input((None, i2, i3))
outputs = rnn((input_1, input_2))
model = keras.models.Model([input_1, input_2], outputs)
model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
"""
### Train the model with randomly generated data
Since there isn't a good candidate dataset for this model, we use random Numpy data for
demonstration.
"""
input_1_data = np.random.random((batch_size * num_batches, timestep, i1))
input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))
target_1_data = np.random.random((batch_size * num_batches, unit_1))
target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))
input_data = [input_1_data, input_2_data]
target_data = [target_1_data, target_2_data]
model.fit(input_data, target_data, batch_size=batch_size)
"""
With the Keras `keras.layers.RNN` layer, You are only expected to define the math
logic for individual step within the sequence, and the `keras.layers.RNN` layer
will handle the sequence iteration for you. It's an incredibly powerful way to quickly
prototype new kinds of RNNs (e.g. a LSTM variant).
For more details, please visit the [API docs](https://keras.io/api/layers/recurrent_layers/rnn/).
"""
| apache-2.0 |
h2oai/h2o | py/testdir_hosts/notest_GBM_parseTrain.py | 9 | 2493 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs as h2j
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_parseTrain(self):
bucket = 'home-0xdiag-datasets'
files = [('standard', 'covtype200x.data', 'covtype.hex', 1800, 54),
('mnist', 'mnist8m.csv', 'mnist8m.hex',1800,0),
('manyfiles-nflx-gz', 'file_95.dat.gz', 'nflx.hex',1800,256),
('standard', 'allyears2k.csv', 'allyears2k.hex',1800,'IsArrDelayed'),
('standard', 'allyears.csv', 'allyears2k.hex',1800,'IsArrDelayed')
]
for importFolderPath,csvFilename,trainKey,timeoutSecs,response in files:
# PARSE train****************************************
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=importFolderPath + "/" + csvFilename,
hex_key=trainKey, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# GBM (train)****************************************
params = {
'destination_key': "GBMKEY",
'learn_rate':.1,
'ntrees':1,
'max_depth':1,
'min_rows':1,
'response':response
}
print "Using these parameters for GBM: ", params
kwargs = params.copy()
#noPoll -> False when GBM finished
GBMResult = h2o_cmd.runGBM(parseResult=parseResult, noPoll=True,timeoutSecs=timeoutSecs,**kwargs)
h2j.pollWaitJobs(pattern="GBMKEY",timeoutSecs=1800,pollTimeoutSecs=1800)
#print "GBM training completed in", GBMResult['python_elapsed'], "seconds.", \
# "%f pct. of timeout" % (GBMResult['python_%timeout'])
GBMView = h2o_cmd.runGBMView(model_key='GBMKEY')
print GBMView['gbm_model']['errs']
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
inonit/wagtail | wagtail/wagtailusers/tests.py | 2 | 19213 | from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
def test_create_with_password_mismatch(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password1",
'password2': "password2",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'], # as we have one page permission already
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse(
'wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permissions to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
# The test group now has two page permissions
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(
response.context['form'].initial.get('permissions'),
list(original_permissions.values_list('id', flat=True))
)
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
| bsd-3-clause |
aetilley/revscoring | revscoring/datasources/parent_revision.py | 1 | 1575 | import mwparserfromhell as mwp
from deltas.tokenizers import wikitext_split
from . import revision
from .datasource import Datasource
metadata = Datasource("parent_revision.metadata")
"""
Returns a :class:`~revscoring.datasources.types.RevisionMetadata` for the parent
revision.
"""
text = Datasource("parent_revision.text")
"""
Returns the text content of the parent revision.
"""
################################ Tokenized #####################################
def process_tokens(revision_text):
return [t for t in wikitext_split.tokenize(revision_text or '')]
tokens = Datasource("parent_revision.tokens",
process_tokens, depends_on=[text])
"""
Returns a list of tokens.
"""
############################### Parse tree #####################################
def process_parse_tree(revision_text):
return mwp.parse(revision_text or "")
parse_tree = Datasource("parent_revision.parse_tree",
process_parse_tree, depends_on=[text])
"""
Returns a :class:`mwparserfromhell.wikicode.WikiCode` abstract syntax tree
representing the content of the revision.
"""
content = Datasource("parent_revision.content", revision.process_content,
depends_on=[parse_tree])
"""
Returns the raw content (no markup or templates) of the revision.
"""
content_tokens = Datasource("parent_revision.content_tokens",
revision.process_content_tokens,
depends_on=[content])
"""
Returns tokens from the raw content (no markup or templates) of the current
revision
"""
| mit |
wasade/picrust | scripts/pool_test_datasets.py | 1 | 11756 | #!/usr/bin/env python
# File created on 10 April 2012
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "1.0.0-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
from collections import defaultdict
from os import listdir
from os.path import join
from cogent.util.option_parsing import parse_command_line_parameters,\
make_option
from picrust.evaluate_test_datasets import unzip,evaluate_test_dataset,\
update_pooled_data, run_accuracy_calculations_on_biom_table,run_accuracy_calculations_on_pooled_data,\
format_scatter_data, format_correlation_data, run_and_format_roc_analysis
from biom.parse import parse_biom_table, convert_biom_to_table
script_info = {}
script_info['brief_description'] = "Pool character predictions within a directory, given directories of expected vs. observed test results"
script_info['script_description'] =\
"""The script finds all paired expected and observed values in a set of directories and generates pooled .biom files in a specified output directory"""
script_info['script_usage'] = [("","Pool .biom files according to holdout_distance.","%prog -i obs_otu_table_dir -e exp_otu_table_dir -p distance -o./evaluation_results/pooled_by_distance/")]
script_info['output_description']= "Outputs will be obs,exp data points for the comparison"
script_info['required_options'] = [
make_option('-i','--trait_table_dir',type="existing_dirpath",help='the input trait table directory (files in biom format)'),\
make_option('-e','--exp_trait_table_dir',type="existing_dirpath",help='the input expected trait table directory (files in biom format)'),\
make_option('-o','--output_dir',type="new_dirpath",help='the output directory'),
]
script_info['optional_options'] = [
make_option('-f','--field_order',\
default='file_type,prediction_method,weighting_method,holdout_method,distance,organism',help='pass comma-separated categories, in the order they appear in file names. Categories are "file_type","prediction_method","weighting_method","holdout_method" (randomization vs. holdout),"distance",and "organism". Example: "-f file_type,test_method,asr_method specifies that files will be in the form: predict_traits--distance_exclusion--wagner. Any unspecified values are set to "not_specified". [default: %default]'),\
make_option('-p','--pool_by',\
default=False,help='pass comma-separated categories to pool results by those metadata categories. Valid categories are: holdout_method, prediction_method,weighting_method,distance and organism. For example, pass "distance" to output results pooled by holdout distance in addition to holdout method and prediction method [default: %default]')
]
script_info['version'] = __version__
def iter_prediction_expectation_pairs(obs_dir_fp,exp_dir_fp,file_name_field_order,file_name_delimiter,verbose=False):
"""Iterate pairs of observed, expected biom file names"""
input_files=sorted(listdir(obs_dir_fp))
for file_number,f in enumerate(input_files):
if verbose:
print "\nExamining file {0} of {1}: {2}".format(file_number+1,len(input_files),f)
if 'accuracy_metrics' in f:
print "%s is an Accuracy file...skipping" %str(f)
continue
#filename_components_list = f.split(file_name_delimiter)
#Get predicted traits
filename_metadata = get_metadata_from_filename(f,file_name_field_order,\
file_name_delimiter,verbose=verbose)
if filename_metadata.get('file_type',None) == 'predict_traits':
if verbose:
#print "Found a prediction file"
print "\tLoading .biom format observation table:",f
try:
obs_table =\
parse_biom_table(open(join(obs_dir_fp,f),'U'))
except ValueError:
print 'Failed, skipping...'
continue
# raise RuntimeError(\
# "Could not parse predicted trait file: %s. Is it a .biom formatted file?" %(f))
else:
continue
# Get paired observation file
exp_filename = file_name_delimiter.join(['exp_biom_traits',filename_metadata['holdout_method'],filename_metadata['distance'],filename_metadata['organism']])
exp_filepath = join(exp_dir_fp,exp_filename)
if verbose:
print "\tLooking for the expected trait file matching %s here: %s" %(f,exp_filepath)
try:
exp_table =\
parse_biom_table(open(exp_filepath,"U"))
except IOError, e:
if strict:
raise IOError(e)
else:
if verbose:
print "Missing expectation file....skipping!"
continue
yield obs_table,exp_table,f
def get_metadata_from_filename(f,file_name_field_order,file_name_delimiter,\
default_text='not_specified',verbose=False):
"""Extract metadata values from a filename"""
filename_components = {}
for i,field in enumerate(f.split(file_name_delimiter)):
filename_components[i]=field
#if verbose:
# print "Filename components:",filename_components
filename_metadata = {}
try:
for field in file_name_field_order.keys():
filename_metadata[field] =\
filename_components.get(file_name_field_order.get(field,default_text),default_text)
#if verbose:
# print "filename_metadata:",filename_metadata
except IndexError, e:
print "Could not parse filename %s using delimiter: %s. Skipping..." %(f,file_name_delimiter)
return None
return filename_metadata
def pool_test_dataset_dir(obs_dir_fp,exp_dir_fp,file_name_delimiter="--",\
file_name_field_order=\
{'file_type':0,"prediction_method":1,"weighting_method":2,"holdout_method":3,\
"distance":4,"organism":5},strict=False, verbose=True,pool_by=['distance']):
"""Retrun pooled control & evaluation results from the given directories
obs_dir_fp -- directory containing PICRUST-predicted genomes. These MUST start with
'predict_traits', and must contain the values specified in file_name_field_order,\
separated by the delimiter given in file_name_delimiter. For example:
predict_traits--exclude_tips_by_distance--0.87--'NC_000913|646311926'
exp_dir_fp -- as obs_dir_fp above, but expectation file names (usually sequenced genomes
with known gene content) must start with exp_biom_traits
file_name_delimiter -- the delimiter that separates metadata stored in the filename
NOTE: technically this isn't the best way of doing things. We may want at some point
to revisit this setup and store metadata about each comparison in a separate file. But
storing in the filename is convenient for our initial analysis.
file_name_field_order -- the order of the required metadata fields in the filename.
Required fields are file_type,method,distance,and organism
pool_by -- if passed, concatenate traits from each trial that is identical in this category. e.g. pool_by 'distance' will pool traits across individual test genomes with the same holdout distance.
The method assumes that for each file type in the observed directory, a paired file
is also found in the exp_dir with similar method, distance, and organism, but a varied
file type (test_tree, test_trait_table)
Process:
1. Search test directory for all gene predictions in the correct format
2. For each, find the corresponding expected trait table in the expectation file
3. Pool by specified pool_by values
4. Return dicts of pooled observation,expectation values
"""
trials = defaultdict(list)
#We'll want a quick unzip fn for converting points to trials
#TODO: separate out into a 'get_paired_data_from_dirs' function
pooled_observations = {}
pooled_expectations = {}
pairs = iter_prediction_expectation_pairs(obs_dir_fp,exp_dir_fp,file_name_field_order,file_name_delimiter,verbose=verbose)
file_number = 0
for obs_table,exp_table,filename in pairs:
#print "analyzing filename:",filename
filename_metadata= get_metadata_from_filename(filename,file_name_field_order,\
file_name_delimiter,verbose=verbose)
#base_tag = '%s\t%s\t' %(filename_metadata['holdout_method'],filename_metadata['prediction_method'])
#tags = [base_tag+'all_results']
if 'file_type' in pool_by:
pool_by.remove('file_type') #we do this manually at the end
combined_tag = ['all']*len(file_name_field_order.keys())
for field in file_name_field_order.keys():
#print combined_tag
#print file_name_field_order
idx = file_name_field_order[field]
#print idx
if field in pool_by:
combined_tag[idx] = filename_metadata[field]
tags=[file_name_delimiter.join(combined_tag)]
if verbose:
print "Pooling by:", pool_by
print "Combined tags:",tags
pooled_observations,pooled_expectations =\
update_pooled_data(obs_table,exp_table,tags,pooled_observations,\
pooled_expectations,str(file_number),verbose=verbose)
file_number += 1
return pooled_observations,pooled_expectations
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
pool_by = opts.pool_by.split(',')
#Construct a dict from user specified field order
file_name_field_order = {}
for i,field in enumerate(opts.field_order.split(',')):
file_name_field_order[field]=i
if opts.verbose:
print "Assuming file names are in this order:",file_name_field_order
for k in pool_by:
#Check that we're only pooling by values that exist
if k not in file_name_field_order.keys():
err_text=\
"Bad value for option '--pool_by'. Can't pool by '%s'. Valid categories are: %s" %(k,\
",".join(file_name_field_order.keys()))
raise ValueError(err_text)
if opts.verbose:
print "Pooling results by:",pool_by
file_name_delimiter='--'
pooled_observations,pooled_expectations = pool_test_dataset_dir(opts.trait_table_dir,\
opts.exp_trait_table_dir,file_name_delimiter=file_name_delimiter,\
file_name_field_order=file_name_field_order,pool_by=pool_by,\
verbose=opts.verbose)
#prediction_prefix = 'predict_traits'
#expectation_prefix = 'exp_biom_traits'
for tag in pooled_observations.keys():
obs_table = pooled_observations[tag]
exp_table = pooled_expectations[tag]
#obs_table_filename = file_name_delimiter.join([prediction_prefix]+[t for t in tag.split()])
#exp_table_filename = file_name_delimiter.join([expectation_prefix]+[t for t in tag.split()])
obs_table_filename = file_name_delimiter.join(['predict_traits']+[t for t in tag.split()])
exp_table_filename = file_name_delimiter.join(['exp_biom_table']+[t for t in tag.split()])
obs_outpath = join(opts.output_dir,obs_table_filename)
exp_outpath = join(opts.output_dir,exp_table_filename)
print obs_outpath
print exp_outpath
f=open(obs_outpath,'w')
f.write(obs_table.delimitedSelf())
f.close()
f=open(exp_outpath,'w')
f.write(exp_table.delimitedSelf())
f.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
schets/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
aetilley/revscoring | revscoring/scorer_models/scorer_model.py | 2 | 9100 | """
.. autoclass:: revscoring.scorer_models.scorer_model.ScorerModel
:members:
.. autoclass:: revscoring.scorer_models.scorer_model.MLScorerModel
:members:
.. autoclass:: revscoring.scorer_models.scorer_model.ScikitLearnClassifier
:members:
"""
import pickle
import time
import traceback
from statistics import mean, stdev
from sklearn.metrics import auc, roc_curve
import yamlconf
from ..extractors import Extractor
from .util import normalize_json
class ScorerModel:
"""
A model used to score a revision based on a set of features.
"""
def __init__(self, features, language=None, version=None):
"""
:Parameters:
features : `list`(`Feature`)
A list of `Feature` s that will be used to train the model and
score new observations.
language : `Language`
A language to use when applying a feature set.
"""
self.features = tuple(features)
self.language = language
self.version = version
def __getattr__(self, attr):
if attr is "version":
return None
else:
raise AttributeError(attr)
def score(self, feature_values):
"""
Make a prediction or otherwise use the model to generate a score.
:Parameters:
feature_values : collection(`mixed`)
an ordered collection of values that correspond to the
`Feature` s provided to the constructor
:Returns:
A `dict` of statistics
"""
raise NotImplementedError()
def _validate_features(self, feature_values):
"""
Checks the features against provided values to confirm types,
ordinality, etc.
"""
return [feature.validate(feature_values)
for feature, value in zip(self.feature, feature_values)]
def _generate_stats(self, values):
columns = zip(*values)
stats = tuple((mean(c), stdev(c)) for c in columns)
return stats
def _scale_and_center(self, values, stats):
for feature_values in values:
yield (tuple((val-mean)/max(sd, 0.01)
for (mean, sd), val in zip(stats, feature_values)))
@classmethod
def from_config(cls, config, name, section_key='scorer_models'):
section = config[section_key][name]
if 'module' in section:
return yamlconf.import_module(section['module'])
elif 'class' in section:
class_path = section['class']
Class = yamlconf.import_module(class_path)
assert cls != Class
return Class.from_config(config, name, section_key=section_key)
class MLScorerModel(ScorerModel):
"""
A machine learned model used to score a revision based on a set of features.
Machine learned models are trained and tested against labeled data.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trained = None
def train(self, values_labels):
"""
Trains the model on labeled data.
:Parameters:
values_scores : `iterable`((`<values_labels>`, `<label>`))
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
`Feature` s provided to the constructor
:Returns:
A dictionary of model statistics.
"""
raise NotImplementedError()
def test(self, values_labels):
"""
Tests the model against a labeled data. Note that test data should be
withheld from from train data.
:Parameters:
values_labels : `iterable`((`<feature_values>`, `<label>`))
an iterable of labeled data Where <values_labels> is an ordered
collection of predictive values that correspond to the
`Feature` s provided to the constructor
:Returns:
A dictionary of test results.
"""
raise NotImplementedError()
@classmethod
def load(cls, f):
"""
Reads serialized model information from a file. Make sure to open
the file as a binary stream.
"""
return pickle.load(f)
def dump(self, f):
"""
Writes serialized model information to a file. Make sure to open the
file as a binary stream.
"""
pickle.dump(self, f)
@classmethod
def from_config(cls, config, name, section_key="scorer_models"):
"""
Constructs a model from configuration.
"""
section = config[section_key][name]
if 'model_file' in section:
return cls.load(open(section['model_file'], 'rb'))
else:
return cls(**{k:v for k,v in section.items() if k != "class"})
class ScikitLearnClassifier(MLScorerModel):
def __init__(self, features, classifier_model, language=None, version=None):
super().__init__(features, language=language, version=version)
self.classifier_model = classifier_model
def train(self, values_labels):
"""
:Returns:
A dictionary with the fields:
* seconds_elapsed -- Time in seconds spent fitting the model
"""
start = time.time()
values, labels = zip(*values_labels)
# Fit SVC model
self.classifier_model.fit(values, labels)
self.trained = time.time()
return {
'seconds_elapsed': time.time() - start
}
def score(self, feature_values):
"""
Generates a score for a single revision based on a set of extracted
feature_values.
:Parameters:
feature_values : collection(`mixed`)
an ordered collection of values that correspond to the
`Feature` s provided to the constructor
:Returns:
A dict with the fields:
* predicion -- The most likely class
* probability -- A mapping of probabilities for input classes
corresponding to the classes the classifier was
trained on. Generating this probability is
slower than a simple prediction.
"""
prediction = self.classifier_model.predict([feature_values])[0]
labels = self.classifier_model.classes_
probas = self.classifier_model.predict_proba([feature_values])[0]
probability = {label:proba for label, proba in zip(labels, probas)}
doc = {
'prediction': prediction,
'probability': probability
}
return normalize_json(doc)
def test(self, values_labels):
"""
:Returns:
A dictionary of test statistics with the fields:
* accuracy -- The mean accuracy of classification
* table -- A truth table for classification
* roc
* auc -- The area under the ROC curve
* fpr -- A list of false-positive rate values
* tpr -- A list of true-positive rate values
* thresholds -- Thresholds on the decision function used to
compute fpr and tpr.
"""
values, labels = zip(*values_labels)
scores = [self.score(feature_values) for feature_values in values]
return {
'table': self._label_table(scores, labels),
'accuracy': self.classifier_model.score(values, labels),
'roc': self._roc_stats(scores, labels,
self.classifier_model.classes_)
}
@classmethod
def _roc_stats(cls, scores, labels, possible_labels):
if len(possible_labels) <= 2:
# Binary classification, class choice doesn't matter.
comparison_label = possible_labels[0]
return cls._roc_single_class(scores, labels, comparison_label)
else:
roc_stats = {}
for comparison_label in possible_labels:
roc_stats[comparison_label] = \
cls._roc_single_class(scores, labels, comparison_label)
return roc_stats
@classmethod
def _roc_single_class(cls, scores, labels, comparison_label):
probabilities = [s['probability'][comparison_label]
for s in scores]
true_positives = [l == comparison_label for l in labels]
fpr, tpr, thresholds = roc_curve(true_positives, probabilities)
return {
'curve': {
'fpr': list(fpr),
'tpr': list(tpr),
'thresholds': list(thresholds)
},
'auc': auc(fpr, tpr)
}
@staticmethod
def _label_table(scores, labels):
predicteds = [s['prediction'] for s in scores]
table = {}
for pair in zip(labels, predicteds):
table[pair] = table.get(pair, 0) + 1
return table
| mit |
pravsripad/mne-python | tutorials/epochs/20_visualize_epochs.py | 2 | 11573 | # -*- coding: utf-8 -*-
"""
.. _tut-visualize-epochs:
========================
Visualizing epoched data
========================
This tutorial shows how to plot epoched data as time series, how to plot the
spectral density of epoched data, how to plot epochs as an imagemap, and how to
plot the sensor locations and projectors stored in `~mne.Epochs` objects.
We'll start by importing the modules we need, loading the continuous (raw)
sample data, and cropping it to save memory:
"""
# %%
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = (sample_data_folder / 'MEG' / 'sample' /
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False).crop(tmax=120)
# %%
# To create the `~mne.Epochs` data structure, we'll extract the event
# IDs stored in the :term:`stim channel`, map those integer event IDs to more
# descriptive condition labels using an event dictionary, and pass those to the
# `~mne.Epochs` constructor, along with the `~mne.io.Raw` data and the
# desired temporal limits of our epochs, ``tmin`` and ``tmax`` (for a
# detailed explanation of these steps, see :ref:`tut-epochs-class`).
events = mne.find_events(raw, stim_channel='STI 014')
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'button': 32}
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, event_id=event_dict,
preload=True)
del raw
# %%
# Plotting ``Epochs`` as time series
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Interactivity in pipelines and scripts
#
# To use the interactive features of the `~mne.Epochs.plot` method
# when running your code non-interactively, pass the ``block=True``
# parameter, which halts the Python interpreter until the figure window is
# closed. That way, any channels or epochs that you mark as "bad" will be
# taken into account in subsequent processing steps.
#
# To visualize epoched data as time series (one time series per channel), the
# `mne.Epochs.plot` method is available. It creates an interactive window
# where you can scroll through epochs and channels, enable/disable any
# unapplied :term:`SSP projectors <projector>` to see how they affect the
# signal, and even manually mark bad channels (by clicking the channel name) or
# bad epochs (by clicking the data) for later dropping. Channels marked "bad"
# will be shown in light grey color and will be added to
# ``epochs.info['bads']``; epochs marked as bad will be indicated as ``'USER'``
# in ``epochs.drop_log``.
#
# Here we'll plot only the "catch" trials from the :ref:`sample dataset
# <sample-dataset>`, and pass in our events array so that the button press
# responses also get marked (we'll plot them in red, and plot the "face" events
# defining time zero for each epoch in blue). We also need to pass in
# our ``event_dict`` so that the `~mne.Epochs.plot` method will know what
# we mean by "button" — this is because subsetting the conditions by
# calling ``epochs['face']`` automatically purges the dropped entries from
# ``epochs.event_id``:
catch_trials_and_buttonpresses = mne.pick_events(events, include=[5, 32])
epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,
event_color=dict(button='red', face='blue'))
# %%
# To see all sensors at once, we can use butterfly mode and group by selection:
epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,
event_color=dict(button='red', face='blue'),
group_by='selection', butterfly=True)
# %%
# Plotting projectors from an ``Epochs`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the plot above we can see heartbeat artifacts in the magnetometer
# channels, so before we continue let's load ECG projectors from disk and apply
# them to the data:
ecg_proj_file = (sample_data_folder / 'MEG' / 'sample' /
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
epochs.add_proj(ecg_projs)
epochs.apply_proj()
# %%
# Just as we saw in the :ref:`tut-section-raw-plot-proj` section, we can plot
# the projectors present in an `~mne.Epochs` object using the same
# `~mne.Epochs.plot_projs_topomap` method. Since the original three
# empty-room magnetometer projectors were inherited from the
# `~mne.io.Raw` file, and we added two ECG projectors for each sensor
# type, we should see nine projector topomaps:
epochs.plot_projs_topomap(vlim='joint')
# %%
# Note that these field maps illustrate aspects of the signal that *have
# already been removed* (because projectors in `~mne.io.Raw` data are
# applied by default when epoching, and because we called
# `~mne.Epochs.apply_proj` after adding additional ECG projectors from
# file). You can check this by examining the ``'active'`` field of the
# projectors:
print(all(proj['active'] for proj in epochs.info['projs']))
# %%
# Plotting sensor locations
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Just like `~mne.io.Raw` objects, `~mne.Epochs` objects
# keep track of sensor locations, which can be visualized with the
# `~mne.Epochs.plot_sensors` method:
epochs.plot_sensors(kind='3d', ch_type='all')
epochs.plot_sensors(kind='topomap', ch_type='all')
# %%
# Plotting the power spectrum of ``Epochs``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Again, just like `~mne.io.Raw` objects, `~mne.Epochs` objects
# have a `~mne.Epochs.plot_psd` method for plotting the `spectral
# density`_ of the data.
epochs['auditory'].plot_psd(picks='eeg')
# %%
# It is also possible to plot spectral estimates across sensors as a scalp
# topography, using `~mne.Epochs.plot_psd_topomap`. The default parameters will
# plot five frequency bands (δ, θ, α, β, γ), will compute power based on
# magnetometer channels, and will plot the power estimates in decibels:
epochs['visual/right'].plot_psd_topomap()
# %%
# Just like `~mne.Epochs.plot_projs_topomap`,
# `~mne.Epochs.plot_psd_topomap` has a ``vlim='joint'`` option for fixing
# the colorbar limits jointly across all subplots, to give a better sense of
# the relative magnitude in each frequency band. You can change which channel
# type is used via the ``ch_type`` parameter, and if you want to view
# different frequency bands than the defaults, the ``bands`` parameter takes a
# list of tuples, with each tuple containing either a single frequency and a
# subplot title, or lower/upper frequency limits and a subplot title:
bands = [(10, '10 Hz'), (15, '15 Hz'), (20, '20 Hz'), (10, 20, '10-20 Hz')]
epochs['visual/right'].plot_psd_topomap(bands=bands, vlim='joint',
ch_type='grad')
# %%
# If you prefer untransformed power estimates, you can pass ``dB=False``. It is
# also possible to normalize the power estimates by dividing by the total power
# across all frequencies, by passing ``normalize=True``. See the docstring of
# `~mne.Epochs.plot_psd_topomap` for details.
#
#
# Plotting ``Epochs`` as an image map
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# A convenient way to visualize many epochs simultaneously is to plot them as
# an image map, with each row of pixels in the image representing a single
# epoch, the horizontal axis representing time, and each pixel's color
# representing the signal value at that time sample for that epoch. Of course,
# this requires either a separate image map for each channel, or some way of
# combining information across channels. The latter is possible using the
# `~mne.Epochs.plot_image` method; the former can be achieved with the
# `~mne.Epochs.plot_image` method (one channel at a time) or with the
# `~mne.Epochs.plot_topo_image` method (all sensors at once).
#
# By default, the image map generated by `~mne.Epochs.plot_image` will be
# accompanied by a scalebar indicating the range of the colormap, and a time
# series showing the average signal across epochs and a bootstrapped 95%
# confidence band around the mean. `~mne.Epochs.plot_image` is a highly
# customizable method with many parameters, including customization of the
# auxiliary colorbar and averaged time series subplots. See the docstrings of
# `~mne.Epochs.plot_image` and `mne.viz.plot_compare_evokeds` (which is
# used to plot the average time series) for full details. Here we'll show the
# mean across magnetometers for all epochs with an auditory stimulus:
epochs['auditory'].plot_image(picks='mag', combine='mean')
# %%
# To plot image maps for individual sensors or a small group of sensors, use
# the ``picks`` parameter. Passing ``combine=None`` (the default) will yield
# separate plots for each sensor in ``picks``; passing ``combine='gfp'`` will
# plot the global field power (useful for combining sensors that respond with
# opposite polarity).
# sphinx_gallery_thumbnail_number = 11
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'])
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'], combine='gfp')
# %%
# To plot an image map for *all* sensors, use
# `~mne.Epochs.plot_topo_image`, which is optimized for plotting a large
# number of image maps simultaneously, and (in interactive sessions) allows you
# to click on each small image map to pop open a separate figure with the
# full-sized image plot (as if you had called `~mne.Epochs.plot_image` on
# just that sensor). At the small scale shown in this tutorial it's hard to see
# much useful detail in these plots; it's often best when plotting
# interactively to maximize the topo image plots to fullscreen. The default is
# a figure with black background, so here we specify a white background and
# black foreground text. By default `~mne.Epochs.plot_topo_image` will
# show magnetometers and gradiometers on the same plot (and hence not show a
# colorbar, since the sensors are on different scales) so we'll also pass a
# `~mne.channels.Layout` restricting each plot to one channel type.
# First, however, we'll also drop any epochs that have unusually high signal
# levels, because they can cause the colormap limits to be too extreme and
# therefore mask smaller signal fluctuations of interest.
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=150e-6) # 150 µV
epochs.drop_bad(reject=reject_criteria)
for ch_type, title in dict(mag='Magnetometers', grad='Gradiometers').items():
layout = mne.channels.find_layout(epochs.info, ch_type=ch_type)
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', title=title)
# %%
# To plot image maps for all EEG sensors, pass an EEG layout as the ``layout``
# parameter of `~mne.Epochs.plot_topo_image`. Note also here the use of
# the ``sigma`` parameter, which smooths each image map along the vertical
# dimension (across epochs) which can make it easier to see patterns across the
# small image maps (by smearing noisy epochs onto their neighbors, while
# reinforcing parts of the image where adjacent epochs are similar). However,
# ``sigma`` can also disguise epochs that have persistent extreme values and
# maybe should have been excluded, so it should be used with caution.
layout = mne.channels.find_layout(epochs.info, ch_type='eeg')
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', sigma=1)
# %%
# .. LINKS
#
# .. _spectral density: https://en.wikipedia.org/wiki/Spectral_density
| bsd-3-clause |
nwiizo/workspace_2017 | keras_ex/example/lstm_text_generation.py | 2 | 3300 | '''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
| mit |
cbick/gps2gtfs | postprocessing/src/ExcelPrint.py | 1 | 6823 | from scipy import array,sort
from pylab import find,hist,figure,repeat
import subprocess
def copy(txt):
"""
Copies txt to the clipboard (OSX only)
"""
p=subprocess.Popen(['pbcopy'],stdin=subprocess.PIPE)
p.stdin.write(txt)
p.stdin.close()
def print_QE_tables(Qs,Es,qs,delim="\t"):
"""
Returns string of 'delim'-delimited printout. Assumes that
Qs,Es are of the form returned by the Stats.QEPlot method, and that
qs is the list of quantile percentages that were supplied to the same
method.
"""
ret = ""
rows = array(Qs.keys())
rows.sort()
# we want 3 values for each Q and also the E
qs = map(str,repeat(qs,3))
for i in range(len(qs)/3):
qs[i*3+0] += " lower CI"
qs[i*3+1] += " upper CI"
cols = qs+["E","E-moe","E+moe"]
ret += delim + delim.join(cols) + "\n"
for row in rows:
Q = Qs[row]
E,moe = Es[row]
ret += str(row) + delim
for qlh in Q:
#excel wants low,high,middle
ret += delim.join(map(str,(qlh[1],qlh[2],qlh[0]))) + delim
ret += str(E) + delim + str(E-moe) + delim + str(E+moe)
ret += "\n"
return ret
def print_ecdf_annotations(ecdf,data,minx=-2000,weighted=True,delim="\t"):
"""
Returns string of 'delim'-delimited printout. Assumes that
ecdf is of the form (x,p,a_n) as returned by the Stats.ecdf()
method, and that data is the (optionally weighted) data
provided to the same.
"""
import Stats
ret = ""
x,p,a_n = ecdf
E_bar,E_moe = Stats.E(data,weighted=weighted,alpha=0.05);
E_x,E_p,i = Stats.evaluate_ecdf(E_bar,x,p)
zero_x,zero_p,j = Stats.evaluate_ecdf(0.0,x,p)
x_q,p_q,i = Stats.find_quantile(0.05,x,p)
ret += "x"+delim+"Expected Value\n"
ret += str(minx) + delim + str(E_p) + "\n"
ret += str(E_bar) + delim + str(E_p) + "\n"
ret += str(E_bar) + delim + str(0) + "\n"
ret += "\n"
ret += "x"+delim+"On Time\n"
ret += str(minx) + delim + str(zero_p) + "\n"
ret += str(0) + delim + str(zero_p) + "\n"
ret += str(0) + delim + str(0) + "\n"
ret += "\n"
ret += "x"+delim+"5% Quantile\n"
ret += str(minx) + delim + str(p_q) + "\n"
ret += str(x_q) + delim + str(p_q) + "\n"
ret += str(x_q) + delim + str(0) + "\n"
ret += "\n"
return ret
def print_ecdfs(ecdfs,delim="\t"):
"""
Returns string of 'delim'-delimited printout. Assumes the result
provided is of the form
{ label : (x,p,a_n) }
where each (x,p,a_n) are as returned by the Stats.ecdf() method.
"""
ret = ""
cols = array(ecdfs.keys())
cols.sort()
cols_per_series = 2
# Header
ret += (delim*(cols_per_series-1))
ret += (delim*cols_per_series).join(map(str,cols)) + "\n"
still_printing = list(cols)
i = 0
while still_printing:
for k in cols:
x,p,a_n = ecdfs[k]
if i >= len(x):
if k in still_printing:
still_printing.remove(k)
ret += delim+delim
else:
ret += str(x[i]) + delim + str(p[i]) + delim
ret += "\n"
i += 1
return ret
def print_expected_wait_vs_arrival(result,delim="\t"):
"""
Returns string of 'delim'-delimited printout. Assumes the result provided
is of the format
{ headway : (arrivals, expected_waits, expected_wait_random) }
"""
ret = ""
rows = set()
for arrs,ews,ewr in result.values():
rows = rows.union(arrs)
rows = array(list(rows))
rows.sort()
cols = array(result.keys())
cols.sort()
# Header; note the blank first entry
ret += delim + delim.join(map(str,cols)) + "\n"
for r in rows:
ret += str(r) + delim
for c in cols:
arrs,ews,ewr = result[c]
ri = find(arrs==r)
if len(ri)==1:
ret += str(ews[ri[0]])
elif len(ri) > 1:
print "INSANITY"
ret += delim
ret += "\n"
ret += "Random" + delim
for c in cols:
ret += str(result[c][2]) + delim
ret += "\n"
return ret
def print_prob_transfer(result,delim="\t"):
"""
Returns string of 'delim'-delimited printout. Assumes the result
provided is of the format
{ label : winprobs }
where winprobs is a Nx2 array where the first column is window size
and the second column is the probability.
"""
ret = ""
rows = set()
for winprobs in result.values():
rows = rows.union(winprobs[:,0])
rows = array(list(rows))
rows.sort()
cols = array(result.keys())
cols.sort()
# Header; note the blank first entry
ret += delim + delim.join(map(str,cols)) + "\n"
for r in rows:
ret += str(r) + delim
for c in cols:
winprobs = result[c]
wins,probs=winprobs[:,0],winprobs[:,1]
ri = find(wins==r)
if len(ri)==1:
ret += str(probs[ri[0]])
elif len(ri) > 1:
print "INSANITY"
ret += delim
ret += "\n"
return ret
def print_histogram(result,delim="\t",weighted=True,bins=10,normed=False):
"""
Returns string of 'delim'-delimited printout. Assumes the result
provided is of the format
{ label : values }
where values is a 1D list or array of values, if weighted is False,
or a 2D array of values (1st column values, 2nd column weight) if
weighted is True.
Note that this function takes care of all the binning and
histogramming for you, you just need to provide the data.
"""
ret = ""
figure() #to avoid clobbering someone else's figure
## Since the pylab.hist() method doesn't actually implement
## barstacking with different-lengthed datasets, we have to
## first merge all data sets into one big histogram in order
## to determine the bin divisions; afterwards we can use that
## bin division to separately get each dataset's hist.
if weighted:
all_values = reduce( lambda accum,next: accum + list(next[:,0]),
result.values(), [] )
all_weights = reduce( lambda accum,next: accum + list(next[:,1]),
result.values(), [] )
else:
all_values = reduce( lambda accum,next: concatenate(accum,next),
result.values() )
all_weights = None
# Note we are overriding bins here
ns,bins,patches = hist(all_values,normed=normed,bins=bins,weights=all_weights)
figure()
ns = []
# Keeps the keys consistently ordered
keys = list(result.keys())
for k in keys:
v = result[k]
if weighted:
v,w = v[:,0],v[:,1]
else:
w = None
n,b,p = hist(v,normed=normed,bins=bins,weights=w,label=str(k))
ns.append(n)
rows = bins
cols = array(keys)
cols.sort()
# Note space for two columns
ret += delim + delim + delim.join(map(str,cols)) + "\n"
for i in range(len(rows)-1):
ret += str(rows[i]) + delim
ret += str((rows[i]+rows[i+1])/2.0) + delim
for n in ns:
# Each n is a list of "how many" per bin
ret += str(n[i]) + delim
ret += "\n"
ret += str(rows[-1]) + "\n"
return ret
| mit |
pravsripad/mne-python | mne/utils/docs.py | 1 | 147407 | # -*- coding: utf-8 -*-
"""The documentation functions."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
from copy import deepcopy
import inspect
import os
import os.path as op
import re
import sys
import warnings
import webbrowser
from decorator import FunctionMaker
from ._bunch import BunchConst
from ..defaults import HEAD_SIZE_DEFAULT
def _reflow_param_docstring(docstring, has_first_line=True, width=75):
"""Reflow text to a nice width for terminals.
WARNING: does not handle gracefully things like .. versionadded::
"""
maxsplit = docstring.count('\n') - 1 if has_first_line else -1
merged = ' '.join(line.strip() for line in
docstring.rsplit('\n', maxsplit=maxsplit))
reflowed = '\n '.join(re.findall(fr'.{{1,{width}}}(?:\s+|$)', merged))
if has_first_line:
reflowed = reflowed.replace('\n \n', '\n', 1)
return reflowed
##############################################################################
# Define our standard documentation entries
#
# To reduce redundancy across functions, please standardize the format to
# ``argument_optional_keywords``. For example ``tmin_raw`` for an entry that
# is specific to ``raw`` and since ``tmin`` is used other places, needs to
# be disambiguated. This way the entries will be easy to find since they
# are alphabetized (you can look up by the name of the argument). This way
# the same ``docdict`` entries are easier to reuse.
docdict = BunchConst()
# %%
# A
docdict['accept'] = """
accept : bool
If True (default False), accept the license terms of this dataset.
"""
docdict['add_ch_type_export_params'] = """
add_ch_type : bool
Whether to incorporate the channel type into the signal label (e.g. whether
to store channel "Fz" as "EEG Fz"). Only used for EDF format. Default is
``False``.
"""
docdict['add_data_kwargs'] = """
add_data_kwargs : dict | None
Additional arguments to brain.add_data (e.g.,
``dict(time_label_size=10)``).
"""
docdict['add_frames'] = """
add_frames : int | None
If int, enable (>=1) or disable (0) the printing of stack frame
information using formatting. Default (None) does not change the
formatting. This can add overhead so is meant only for debugging.
"""
docdict['adjacency_clust'] = """
adjacency : scipy.sparse.spmatrix | None | False
Defines adjacency between locations in the data, where "locations" can be
spatial vertices, frequency bins, time points, etc. For spatial vertices,
see: :func:`mne.channels.find_ch_adjacency`. If ``False``, assumes
no adjacency (each location is treated as independent and unconnected).
If ``None``, a regular lattice adjacency is assumed, connecting
each {sp} location to its neighbor(s) along the last dimension
of {{eachgrp}} ``{{x}}``{lastdim}.
If ``adjacency`` is a matrix, it is assumed to be symmetric (only the
upper triangular half is used) and must be square with dimension equal to
``{{x}}.shape[-1]`` {parone} or ``{{x}}.shape[-1] * {{x}}.shape[-2]``
{partwo} or (optionally)
``{{x}}.shape[-1] * {{x}}.shape[-2] * {{x}}.shape[-3]``
{parthree}.{memory}
"""
mem = (' If spatial adjacency is uniform in time, it is recommended to use '
'a square matrix with dimension ``{x}.shape[-1]`` (n_vertices) to save '
'memory and computation, and to use ``max_step`` to define the extent '
'of temporal adjacency to consider when clustering.')
comb = ' The function `mne.stats.combine_adjacency` may be useful for 4D data.'
st = dict(sp='spatial', lastdim='', parone='(n_vertices)',
partwo='(n_times * n_vertices)',
parthree='(n_times * n_freqs * n_vertices)', memory=mem)
tf = dict(sp='', lastdim=' (or the last two dimensions if ``{x}`` is 2D)',
parone='(for 2D data)', partwo='(for 3D data)',
parthree='(for 4D data)', memory=comb)
nogroups = dict(eachgrp='', x='X')
groups = dict(eachgrp='each group ', x='X[k]')
docdict['adjacency_clust_1'] = \
docdict['adjacency_clust'].format(**tf).format(**nogroups)
docdict['adjacency_clust_n'] = \
docdict['adjacency_clust'].format(**tf).format(**groups)
docdict['adjacency_clust_st1'] = \
docdict['adjacency_clust'].format(**st).format(**nogroups)
docdict['adjacency_clust_stn'] = \
docdict['adjacency_clust'].format(**st).format(**groups)
docdict['adjust_dig_chpi'] = """
adjust_dig : bool
If True, adjust the digitization locations used for fitting based on
the positions localized at the start of the file.
"""
docdict['agg_fun_psd_topo'] = """
agg_fun : callable
The function used to aggregate over frequencies. Defaults to
:func:`numpy.sum` if ``normalize=True``, else :func:`numpy.mean`.
"""
docdict['align_view'] = """
align : bool
If True, consider view arguments relative to canonical MRI
directions (closest to MNI for the subject) rather than native MRI
space. This helps when MRIs are not in standard orientation (e.g.,
have large rotations).
"""
docdict['allow_empty_eltc'] = """
allow_empty : bool | str
``False`` (default) will emit an error if there are labels that have no
vertices in the source estimate. ``True`` and ``'ignore'`` will return
all-zero time courses for labels that do not have any vertices in the
source estimate, and True will emit a warning while and "ignore" will
just log a message.
.. versionchanged:: 0.21.0
Support for "ignore".
"""
docdict['alpha'] = """
alpha : float in [0, 1]
Alpha level to control opacity.
"""
docdict['anonymize_info_notes'] = """
Removes potentially identifying information if it exists in ``info``.
Specifically for each of the following we use:
- meas_date, file_id, meas_id
A default value, or as specified by ``daysback``.
- subject_info
Default values, except for 'birthday' which is adjusted
to maintain the subject age.
- experimenter, proj_name, description
Default strings.
- utc_offset
``None``.
- proj_id
Zeros.
- proc_history
Dates use the ``meas_date`` logic, and experimenter a default string.
- helium_info, device_info
Dates use the ``meas_date`` logic, meta info uses defaults.
If ``info['meas_date']`` is ``None``, it will remain ``None`` during processing
the above fields.
Operates in place.
"""
# raw/epochs/evoked apply_function method
# apply_function method summary
applyfun_summary = """\
The function ``fun`` is applied to the channels defined in ``picks``.
The {} object's data is modified in-place. If the function returns a different
data type (e.g. :py:obj:`numpy.complex128`) it must be specified
using the ``dtype`` parameter, which causes the data type of **all** the data
to change (even if the function is only applied to channels in ``picks``).{}
.. note:: If ``n_jobs`` > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporarily stored in memory.
.. note:: If the data type changes (``dtype != None``), more memory is
required since the original and the converted data needs
to be stored in memory.
"""
applyfun_preload = (' The object has to have the data loaded e.g. with '
'``preload=True`` or ``self.load_data()``.')
docdict['applyfun_summary_epochs'] = \
applyfun_summary.format('epochs', applyfun_preload)
docdict['applyfun_summary_evoked'] = \
applyfun_summary.format('evoked', '')
docdict['applyfun_summary_raw'] = \
applyfun_summary.format('raw', applyfun_preload)
docdict['area_alpha_plot_psd'] = """
area_alpha : float
Alpha for the area.
"""
docdict['area_mode_plot_psd'] = """
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted. If average=False, no area is plotted.
"""
docdict['aseg'] = """
aseg : str
The anatomical segmentation file. Default ``aparc+aseg``. This may
be any anatomical segmentation file in the mri subdirectory of the
Freesurfer subject directory.
"""
docdict['average_plot_psd'] = """
average : bool
If False, the PSDs of all channels is displayed. No averaging
is done and parameters area_mode and area_alpha are ignored. When
False, it is possible to paint an area (hold left mouse button and
drag) to plot a topomap.
"""
docdict['average_psd'] = """
average : str | None
How to average the segments. If ``mean`` (default), calculate the
arithmetic mean. If ``median``, calculate the median, corrected for
its bias relative to the mean. If ``None``, returns the unaggregated
segments.
"""
docdict['average_tfr'] = """
average : bool, default True
If ``False`` return an `EpochsTFR` containing separate TFRs for each
epoch. If ``True`` return an `AverageTFR` containing the average of all
TFRs across epochs.
.. note::
Using ``average=True`` is functionally equivalent to using
``average=False`` followed by ``EpochsTFR.average()``, but is
more memory efficient.
.. versionadded:: 0.13.0
"""
docdict['axes_psd_topo'] = """
axes : list of Axes | None
List of axes to plot consecutive topographies to. If ``None`` the axes
will be created automatically. Defaults to ``None``.
"""
docdict['axes_topomap'] = """
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of the
same length as ``times`` (unless ``times`` is None). If instance of
Axes, ``times`` must be a float or a list of one float.
Defaults to None.
"""
docdict['azimuth'] = """
azimuth : float
The azimuthal angle of the camera rendering the view in degrees.
"""
# %%
# B
docdict['bad_condition_maxwell_cond'] = """
bad_condition : str
How to deal with ill-conditioned SSS matrices. Can be "error"
(default), "warning", "info", or "ignore".
"""
docdict['bands_psd_topo'] = """
bands : list of tuple | None
The frequencies or frequency ranges to plot. Length-2 tuples specify
a single frequency and a subplot title (e.g.,
``(6.5, 'presentation rate')``); length-3 tuples specify lower and
upper band edges and a subplot title. If ``None`` (the default),
expands to::
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
In bands where a single frequency is provided, the topomap will reflect
the single frequency bin that is closest to the provided value.
"""
docdict['base_estimator'] = """
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
"""
_baseline_rescale_base = """
baseline : None | tuple of length 2
The time interval to consider as "baseline" when applying baseline
correction. If ``None``, do not apply baseline correction.
If a tuple ``(a, b)``, the interval is between ``a`` and ``b``
(in seconds), including the endpoints.
If ``a`` is ``None``, the **beginning** of the data is used; and if ``b``
is ``None``, it is set to the **end** of the interval.
If ``(None, None)``, the entire time interval is used.
.. note:: The baseline ``(a, b)`` includes both endpoints, i.e. all
timepoints ``t`` such that ``a <= t <= b``.
"""
docdict['baseline_epochs'] = f"""{_baseline_rescale_base}
Correction is applied **to each epoch and channel individually** in the
following way:
1. Calculate the mean signal of the baseline period.
2. Subtract this mean from the **entire** epoch.
"""
docdict['baseline_evoked'] = f"""{_baseline_rescale_base}
Correction is applied **to each channel individually** in the following
way:
1. Calculate the mean signal of the baseline period.
2. Subtract this mean from the **entire** ``Evoked``.
"""
docdict['baseline_report'] = f"""{_baseline_rescale_base}
Correction is applied in the following way **to each channel:**
1. Calculate the mean signal of the baseline period.
2. Subtract this mean from the **entire** time period.
For `~mne.Epochs`, this algorithm is run **on each epoch individually.**
"""
docdict['baseline_rescale'] = _baseline_rescale_base
docdict['baseline_stc'] = f"""{_baseline_rescale_base}
Correction is applied **to each source individually** in the following
way:
1. Calculate the mean signal of the baseline period.
2. Subtract this mean from the **entire** source estimate data.
.. note:: Baseline correction is appropriate when signal and noise are
approximately additive, and the noise level can be estimated from
the baseline interval. This can be the case for non-normalized
source activities (e.g. signed and unsigned MNE), but it is not
the case for normalized estimates (e.g. signal-to-noise ratios,
dSPM, sLORETA).
"""
docdict['border_topomap'] = """
border : float | 'mean'
Value to extrapolate to on the topomap borders. If ``'mean'`` (default),
then each extrapolated point has the average value of its neighbours.
.. versionadded:: 0.20
"""
docdict['brain_kwargs'] = """
brain_kwargs : dict | None
Additional arguments to the :class:`mne.viz.Brain` constructor (e.g.,
``dict(silhouette=True)``).
"""
docdict['browser'] = """
fig : matplotlib.figure.Figure | mne_qt_browser.figure.MNEQtBrowser
Browser instance.
"""
docdict['buffer_size_clust'] = """
buffer_size : int | None
Block size to use when computing test statistics. This can significantly
reduce memory usage when ``n_jobs > 1`` and memory sharing between
processes is enabled (see :func:`mne.set_cache_dir`), because ``X`` will be
shared between processes and each process only needs to allocate space for
a small block of locations at a time.
"""
docdict['by_event_type'] = """
by_event_type : bool
When ``False`` (the default) all epochs are processed together and a single
:class:`~mne.Evoked` object is returned. When ``True``, epochs are first
grouped by event type (as specified using the ``event_id`` parameter) and a
list is returned containing a separate :class:`~mne.Evoked` object for each
event type. The ``.comment`` attribute is set to the label of the event
type.
.. versionadded:: 0.24.0
"""
# %%
# C
docdict['calibration_maxwell_cal'] = """
calibration : str | None
Path to the ``'.dat'`` file with fine calibration coefficients.
File can have 1D or 3D gradiometer imbalance correction.
This file is machine/site-specific.
"""
docdict['cbar_fmt_psd_topo'] = """
cbar_fmt : str
Format string for the colorbar tick labels. If ``'auto'``, is equivalent
to '%0.3f' if ``dB=False`` and '%0.1f' if ``dB=True``. Defaults to
``'auto'``.
"""
docdict['cbar_fmt_topomap'] = """
cbar_fmt : str
String format for colorbar values.
"""
docdict['center'] = """
center : float or None
If not None, center of a divergent colormap, changes the meaning of
fmin, fmax and fmid.
"""
docdict['ch_name_ecg'] = """
ch_name : None | str
The name of the channel to use for ECG peak detection.
If ``None`` (default), ECG channel is used if present. If ``None`` and
**no** ECG channel is present, a synthetic ECG channel is created from
the cross-channel average. This synthetic channel can only be created from
MEG channels.
"""
docdict['ch_name_eog'] = """
ch_name : str | list of str | None
The name of the channel(s) to use for EOG peak detection. If a string,
can be an arbitrary channel. This doesn't have to be a channel of
``eog`` type; it could, for example, also be an ordinary EEG channel
that was placed close to the eyes, like ``Fp1`` or ``Fp2``.
Multiple channel names can be passed as a list of strings.
If ``None`` (default), use the channel(s) in ``raw`` with type ``eog``.
"""
docdict['ch_names_annot'] = """
ch_names : list | None
List of lists of channel names associated with the annotations.
Empty entries are assumed to be associated with no specific channel,
i.e., with all channels or with the time slice itself. None (default) is
the same as passing all empty lists. For example, this creates three
annotations, associating the first with the time interval itself, the
second with two channels, and the third with a single channel::
Annotations(onset=[0, 3, 10], duration=[1, 0.25, 0.5],
description=['Start', 'BAD_flux', 'BAD_noise'],
ch_names=[[], ['MEG0111', 'MEG2563'], ['MEG1443']])
"""
docdict['ch_type_evoked_topomap'] = """
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
"""
docdict['ch_type_set_eeg_reference'] = """
ch_type : list of str | str
The name of the channel type to apply the reference to.
Valid channel types are ``'auto'``, ``'eeg'``, ``'ecog'``, ``'seeg'``,
``'dbs'``. If ``'auto'``, the first channel type of eeg, ecog, seeg or dbs
that is found (in that order) will be selected.
.. versionadded:: 0.19
"""
docdict['ch_type_topomap'] = """
ch_type : str
The channel type being plotted. Determines the ``'auto'``
extrapolation mode.
.. versionadded:: 0.21
"""
chwise = """
channel_wise : bool
Whether to apply the function to each channel {}individually. If ``False``,
the function will be applied to all {}channels at once. Default ``True``.
"""
docdict['channel_wise_applyfun'] = chwise.format('', '')
docdict['channel_wise_applyfun_epo'] = chwise.format(
'in each epoch ', 'epochs and ')
docdict['check_disjoint_clust'] = """
check_disjoint : bool
Whether to check if the connectivity matrix can be separated into disjoint
sets before clustering. This may lead to faster clustering, especially if
the second dimension of ``X`` (usually the "time" dimension) is large.
"""
docdict['chpi_amplitudes'] = """
chpi_amplitudes : dict
The time-varying cHPI coil amplitudes, with entries
"times", "proj", and "slopes".
"""
docdict['chpi_locs'] = """
chpi_locs : dict
The time-varying cHPI coils locations, with entries
"times", "rrs", "moments", and "gofs".
"""
docdict['clim'] = """
clim : str | dict
Colorbar properties specification. If 'auto', set clim automatically
based on data percentiles. If dict, should contain:
``kind`` : 'value' | 'percent'
Flag to specify type of limits.
``lims`` : list | np.ndarray | tuple of float, 3 elements
Lower, middle, and upper bounds for colormap.
``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
Lower, middle, and upper bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
.. note:: Only one of ``lims`` or ``pos_lims`` should be provided.
Only sequential colormaps should be used with ``lims``, and
only divergent colormaps should be used with ``pos_lims``.
"""
docdict['clim_onesided'] = """
clim : str | dict
Colorbar properties specification. If 'auto', set clim automatically
based on data percentiles. If dict, should contain:
``kind`` : 'value' | 'percent'
Flag to specify type of limits.
``lims`` : list | np.ndarray | tuple of float, 3 elements
Lower, middle, and upper bound for colormap.
Unlike :meth:`stc.plot <mne.SourceEstimate.plot>`, it cannot use
``pos_lims``, as the surface plot must show the magnitude.
"""
docdict['cmap_psd_topo'] = """
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If :class:`tuple`, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If ``None``, ``'Reds'`` is used for data that is either
all-positive or all-negative, and ``'RdBu_r'`` is used otherwise.
``'interactive'`` is equivalent to ``(None, True)``. Defaults to ``None``.
"""
docdict['cmap_topomap'] = """
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range (zoom).
The mouse scroll can also be used to adjust the range. Hitting space
bar resets the range. Up and down arrows can be used to change the
colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps. Interactive mode is disabled by default for more than
2 topomaps.
"""
docdict['cmap_topomap_simple'] = """
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
"""
docdict['cnorm'] = """
cnorm : matplotlib.colors.Normalize | None
Colormap normalization, default None means linear normalization. If not
None, ``vmin`` and ``vmax`` arguments are ignored. See Notes for more
details.
"""
docdict['color_matplotlib'] = """
color : color
A list of anything matplotlib accepts: string, RGB, hex, etc.
"""
docdict['color_plot_psd'] = """
color : str | tuple
A matplotlib-compatible color to use. Has no effect when
spatial_colors=True.
"""
docdict['colorbar_topomap'] = """
colorbar : bool
Plot a colorbar in the rightmost column of the figure.
"""
docdict['colormap'] = """
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255.
"""
docdict['combine'] = """
combine : None | str | callable
How to combine information across channels. If a :class:`str`, must be
one of 'mean', 'median', 'std' (standard deviation) or 'gfp' (global
field power).
"""
docdict['compute_proj_ecg'] = """This function will:
#. Filter the ECG data channel.
#. Find ECG R wave peaks using :func:`mne.preprocessing.find_ecg_events`.
#. Filter the raw data.
#. Create `~mne.Epochs` around the R wave peaks, capturing the heartbeats.
#. Optionally average the `~mne.Epochs` to produce an `~mne.Evoked` if
``average=True`` was passed (default).
#. Calculate SSP projection vectors on that data to capture the artifacts."""
docdict['compute_proj_eog'] = """This function will:
#. Filter the EOG data channel.
#. Find the peaks of eyeblinks in the EOG data using
:func:`mne.preprocessing.find_eog_events`.
#. Filter the raw data.
#. Create `~mne.Epochs` around the eyeblinks.
#. Optionally average the `~mne.Epochs` to produce an `~mne.Evoked` if
``average=True`` was passed (default).
#. Calculate SSP projection vectors on that data to capture the artifacts."""
docdict['compute_ssp'] = """This function aims to find those SSP vectors that
will project out the ``n`` most prominent signals from the data for each
specified sensor type. Consequently, if the provided input data contains high
levels of noise, the produced SSP vectors can then be used to eliminate that
noise from the data.
"""
docdict['contours_topomap'] = """
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. The values are in µV for EEG, fT for magnetometers and
fT/m for gradiometers. If colorbar=True, the ticks in colorbar
correspond to the contour levels. Defaults to 6.
"""
docdict['coord_frame_maxwell'] = """
coord_frame : str
The coordinate frame that the ``origin`` is specified in, either
``'meg'`` or ``'head'``. For empty-room recordings that do not have
a head<->meg transform ``info['dev_head_t']``, the MEG coordinate
frame should be used.
"""
docdict['copy_df'] = """
copy : bool
If ``True``, data will be copied. Otherwise data may be modified in place.
Defaults to ``True``.
"""
docdict['create_ecg_epochs'] = """This function will:
#. Filter the ECG data channel.
#. Find ECG R wave peaks using :func:`mne.preprocessing.find_ecg_events`.
#. Create `~mne.Epochs` around the R wave peaks, capturing the heartbeats.
"""
docdict['create_eog_epochs'] = """This function will:
#. Filter the EOG data channel.
#. Find the peaks of eyeblinks in the EOG data using
:func:`mne.preprocessing.find_eog_events`.
#. Create `~mne.Epochs` around the eyeblinks.
"""
docdict['cross_talk_maxwell'] = """
cross_talk : str | None
Path to the FIF file with cross-talk correction information.
"""
# %%
# D
docdict['dB_plot_psd'] = """
dB : bool
Plot Power Spectral Density (PSD), in units (amplitude**2/Hz (dB)) if
``dB=True``, and ``estimate='power'`` or ``estimate='auto'``. Plot PSD
in units (amplitude**2/Hz) if ``dB=False`` and,
``estimate='power'``. Plot Amplitude Spectral Density (ASD), in units
(amplitude/sqrt(Hz)), if ``dB=False`` and ``estimate='amplitude'`` or
``estimate='auto'``. Plot ASD, in units (amplitude/sqrt(Hz) (db)), if
``dB=True`` and ``estimate='amplitude'``.
"""
docdict['dB_psd_topo'] = """
dB : bool
If ``True``, transform data to decibels (with ``10 * np.log10(data)``)
following the application of ``agg_fun``. Ignored if ``normalize=True``.
"""
docdict['daysback_anonymize_info'] = """
daysback : int | None
Number of days to subtract from all dates.
If ``None`` (default), the acquisition date, ``info['meas_date']``,
will be set to ``January 1ˢᵗ, 2000``. This parameter is ignored if
``info['meas_date']`` is ``None`` (i.e., no acquisition date has been set).
"""
docdict['dbs'] = """
dbs : bool
If True (default), show DBS (deep brain stimulation) electrodes.
"""
docdict['decim'] = """
decim : int
Factor by which to subsample the data.
.. warning:: Low-pass filtering is not performed, this simply selects
every Nth sample (where N is the value passed to
``decim``), i.e., it compresses the signal (see Notes).
If the data are not properly filtered, aliasing artifacts
may occur.
"""
docdict['decim_notes'] = """
For historical reasons, ``decim`` / "decimation" refers to simply subselecting
samples from a given signal. This contrasts with the broader signal processing
literature, where decimation is defined as (quoting
:footcite:`OppenheimEtAl1999`, p. 172; which cites
:footcite:`CrochiereRabiner1983`):
"... a general system for downsampling by a factor of M is the one shown
in Figure 4.23. Such a system is called a decimator, and downsampling
by lowpass filtering followed by compression [i.e, subselecting samples]
has been termed decimation (Crochiere and Rabiner, 1983)."
Hence "decimation" in MNE is what is considered "compression" in the signal
processing community.
Decimation can be done multiple times. For example,
``inst.decimate(2).decimate(2)`` will be the same as
``inst.decimate(4)``.
"""
docdict['depth'] = """
depth : None | float | dict
How to weight (or normalize) the forward using a depth prior.
If float (default 0.8), it acts as the depth weighting exponent (``exp``)
to use None is equivalent to 0, meaning no depth weighting is performed.
It can also be a :class:`dict` containing keyword arguments to pass to
:func:`mne.forward.compute_depth_prior` (see docstring for details and
defaults). This is effectively ignored when ``method='eLORETA'``.
.. versionchanged:: 0.20
Depth bias ignored for ``method='eLORETA'``.
"""
docdict['destination_maxwell_dest'] = """
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
"""
docdict['detrend_epochs'] = """
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
"""
docdict['df_return'] = """
df : instance of pandas.DataFrame
A dataframe suitable for usage with other statistical/plotting/analysis
packages.
"""
docdict['dig_kinds'] = """
dig_kinds : list of str | str
Kind of digitization points to use in the fitting. These can be any
combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also
be 'auto' (default), which will use only the 'extra' points if
enough (more than 4) are available, and if not, uses 'extra' and
'eeg' points.
"""
docdict['dipole'] = """
dipole : instance of Dipole | list of Dipole
Dipole object containing position, orientation and amplitude of
one or more dipoles. Multiple simultaneous dipoles may be defined by
assigning them identical times. Alternatively, multiple simultaneous
dipoles may also be specified as a list of Dipole objects.
.. versionchanged:: 1.1
Added support for a list of :class:`mne.Dipole` instances.
"""
docdict['distance'] = """
distance : float | None
The distance from the camera rendering the view to the focalpoint
in plot units (either m or mm).
"""
docdict['dtype_applyfun'] = """
dtype : numpy.dtype
Data type to use after applying the function. If None
(default) the data type is not modified.
"""
# %%
# E
docdict['ecog'] = """
ecog : bool
If True (default), show ECoG sensors.
"""
docdict['eeg'] = """
eeg : bool | str | list
String options are:
- "original" (default; equivalent to ``True``)
Shows EEG sensors using their digitized locations (after
transformation to the chosen ``coord_frame``)
- "projected"
The EEG locations projected onto the scalp, as is done in
forward modeling
Can also be a list of these options, or an empty list (``[]``,
equivalent of ``False``).
"""
docdict['elevation'] = """
elevation : float
The The zenith angle of the camera rendering the view in degrees.
"""
docdict['eltc_mode_notes'] = """
Valid values for ``mode`` are:
- ``'max'``
Maximum value across vertices at each time point within each label.
- ``'mean'``
Average across vertices at each time point within each label. Ignores
orientation of sources for standard source estimates, which varies
across the cortical surface, which can lead to cancellation.
Vector source estimates are always in XYZ / RAS orientation, and are thus
already geometrically aligned.
- ``'mean_flip'``
Finds the dominant direction of source space normal vector orientations
within each label, applies a sign-flip to time series at vertices whose
orientation is more than 180° different from the dominant direction, and
then averages across vertices at each time point within each label.
- ``'pca_flip'``
Applies singular value decomposition to the time courses within each label,
and uses the first right-singular vector as the representative label time
course. This signal is scaled so that its power matches the average
(per-vertex) power within the label, and sign-flipped by multiplying by
``np.sign(u @ flip)``, where ``u`` is the first left-singular vector and
``flip`` is the same sign-flip vector used when ``mode='mean_flip'``. This
sign-flip ensures that extracting time courses from the same label in
similar STCs does not result in 180° direction/phase changes.
- ``'auto'`` (default)
Uses ``'mean_flip'`` when a standard source estimate is applied, and
``'mean'`` when a vector source estimate is supplied.
.. versionadded:: 0.21
Support for ``'auto'``, vector, and volume source estimates.
The only modes that work for vector and volume source estimates are ``'mean'``,
``'max'``, and ``'auto'``.
"""
docdict['emit_warning'] = """
emit_warning : bool
Whether to emit warnings when cropping or omitting annotations.
"""
docdict['epochs_preload'] = """
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
"""
docdict['epochs_reject_tmin_tmax'] = """
reject_tmin, reject_tmax : float | None
Start and end of the time window used to reject epochs based on
peak-to-peak (PTP) amplitudes as specified via ``reject`` and ``flat``.
The default ``None`` corresponds to the first and last time points of the
epochs, respectively.
.. note:: This parameter controls the time period used in conjunction with
both, ``reject`` and ``flat``.
"""
docdict['epochs_tmin_tmax'] = """
tmin, tmax : float
Start and end time of the epochs in seconds, relative to the time-locked
event. The closest or matching samples corresponding to the start and end
time are included. Defaults to ``-0.2`` and ``0.5``, respectively.
"""
docdict['estimate_plot_psd'] = """
estimate : str, {'auto', 'power', 'amplitude'}
Can be "power" for power spectral density (PSD), "amplitude" for
amplitude spectrum density (ASD), or "auto" (default), which uses
"power" when dB is True and "amplitude" otherwise.
"""
docdict['event_color'] = """
event_color : color object | dict | None
Color(s) to use for :term:`events`. To show all :term:`events` in the same
color, pass any matplotlib-compatible color. To color events differently,
pass a `dict` that maps event names or integer event numbers to colors
(must include entries for *all* events, or include a "fallback" entry with
key ``-1``). If ``None``, colors are chosen from the current Matplotlib
color cycle.
"""
docdict['event_id'] = """
event_id : int | list of int | dict | None
The id of the :term:`events` to consider. If dict, the keys can later be
used to access associated :term:`events`. Example:
dict(auditory=1, visual=3). If int, a dict will be created with the id as
string. If a list, all :term:`events` with the IDs specified in the list
are used. If None, all :term:`events` will be used and a dict is created
with string integer names corresponding to the event id integers."""
docdict['event_id_ecg'] = """
event_id : int
The index to assign to found ECG events.
"""
docdict['event_repeated_epochs'] = """
event_repeated : str
How to handle duplicates in ``events[:, 0]``. Can be ``'error'``
(default), to raise an error, 'drop' to only retain the row occurring
first in the :term:`events`, or ``'merge'`` to combine the coinciding
events (=duplicates) into a new event (see Notes for details).
.. versionadded:: 0.19
"""
docdict['events'] = """
events : array of int, shape (n_events, 3)
The array of :term:`events`. The first column contains the event time in
samples, with :term:`first_samp` included. The third column contains the
event id."""
docdict['events_epochs'] = """
events : array of int, shape (n_events, 3)
The array of :term:`events`. The first column contains the event time in
samples, with :term:`first_samp` included. The third column contains the
event id.
If some events don't match the events of interest as specified by event_id,
they will be marked as ``IGNORED`` in the drop log."""
docdict['evoked_by_event_type_returns'] = """
evoked : instance of Evoked | list of Evoked
The averaged epochs.
When ``by_event_type=True`` was specified, a list is returned containing a
separate :class:`~mne.Evoked` object for each event type. The list has the
same order as the event types as specified in the ``event_id``
dictionary.
"""
docdict['exclude_clust'] = """
exclude : bool array or None
Mask to apply to the data to exclude certain points from clustering
(e.g., medial wall vertices). Should be the same shape as ``X``.
If ``None``, no points are excluded.
"""
docdict['exclude_frontal'] = """
exclude_frontal : bool
If True, exclude points that have both negative Z values
(below the nasion) and positivy Y values (in front of the LPA/RPA).
"""
docdict['export_edf_note'] = """
For EDF exports, only channels measured in Volts are allowed; in MNE-Python
this means channel types 'eeg', 'ecog', 'seeg', 'emg', 'eog', 'ecg', 'dbs',
'bio', and 'misc'. 'stim' channels are dropped. Although this function
supports storing channel types in the signal label (e.g. ``EEG Fz`` or
``MISC E``), other software may not support this (optional) feature of
the EDF standard.
If ``add_ch_type`` is True, then channel types are written based on what
they are currently set in MNE-Python. One should double check that all
their channels are set correctly. You can call
:attr:`raw.set_channel_types <mne.io.Raw.set_channel_types>` to set
channel types.
In addition, EDF does not support storing a montage. You will need
to store the montage separately and call :attr:`raw.set_montage()
<mne.io.Raw.set_montage>`.
"""
docdict['export_eeglab_note'] = """
For EEGLAB exports, channel locations are expanded to full EEGLAB format.
For more details see :func:`eeglabio.utils.cart_to_eeglab`.
"""
_export_fmt_params_base = """Format of the export. Defaults to ``'auto'``, which will infer the format
from the filename extension. See supported formats above for more
information."""
docdict['export_fmt_params_epochs'] = """
fmt : 'auto' | 'eeglab'
{}
""".format(_export_fmt_params_base)
docdict['export_fmt_params_evoked'] = """
fmt : 'auto' | 'mff'
{}
""".format(_export_fmt_params_base)
docdict['export_fmt_params_raw'] = """
fmt : 'auto' | 'brainvision' | 'edf' | 'eeglab'
{}
""".format(_export_fmt_params_base)
docdict['export_fmt_support_epochs'] = """\
Supported formats:
- EEGLAB (``.set``, uses :mod:`eeglabio`)
"""
docdict['export_fmt_support_evoked'] = """\
Supported formats:
- MFF (``.mff``, uses :func:`mne.export.export_evokeds_mff`)
"""
docdict['export_fmt_support_raw'] = """\
Supported formats:
- BrainVision (``.vhdr``, ``.vmrk``, ``.eeg``, uses `pybv <https://github.com/bids-standard/pybv>`_)
- EEGLAB (``.set``, uses :mod:`eeglabio`)
- EDF (``.edf``, uses `EDFlib-Python <https://gitlab.com/Teuniz/EDFlib-Python>`_)
""" # noqa: E501
docdict['export_warning'] = """\
.. warning::
Since we are exporting to external formats, there's no guarantee that all
the info will be preserved in the external format. See Notes for details.
"""
_export_warning_note_base = """\
Export to external format may not preserve all the information from the
instance. To save in native MNE format (``.fif``) without information loss,
use :meth:`mne.{0}.save` instead.
Export does not apply projector(s). Unapplied projector(s) will be lost.
Consider applying projector(s) before exporting with
:meth:`mne.{0}.apply_proj`."""
docdict['export_warning_note_epochs'] = \
_export_warning_note_base.format('Epochs')
docdict['export_warning_note_evoked'] = \
_export_warning_note_base.format('Evoked')
docdict['export_warning_note_raw'] = \
_export_warning_note_base.format('io.Raw')
docdict['ext_order_chpi'] = """
ext_order : int
The external order for SSS-like interfence suppression.
The SSS bases are used as projection vectors during fitting.
.. versionchanged:: 0.20
Added ``ext_order=1`` by default, which should improve
detection of true HPI signals.
"""
docdict['ext_order_maxwell'] = """
ext_order : int
Order of external component of spherical expansion.
"""
docdict['extended_proj_maxwell'] = """
extended_proj : list
The empty-room projection vectors used to extend the external
SSS basis (i.e., use eSSS).
.. versionadded:: 0.21
"""
docdict['extrapolate_topomap'] = """
extrapolate : str
Options:
- ``'box'``
Extrapolate to four points placed to form a square encompassing all
data points, where each side of the square is three times the range
of the data in the respective dimension.
- ``'local'`` (default for MEG sensors)
Extrapolate only to nearby points (approximately to points closer than
median inter-electrode distance). This will also set the
mask to be polygonal based on the convex hull of the sensors.
- ``'head'`` (default for non-MEG sensors)
Extrapolate out to the edges of the clipping circle. This will be on
the head circle when the sensors are contained within the head circle,
but it can extend beyond the head when sensors are plotted outside
the head circle.
.. versionchanged:: 0.21
- The default was changed to ``'local'`` for MEG sensors.
- ``'local'`` was changed to use a convex hull mask
- ``'head'`` was changed to extrapolate out to the clipping circle.
"""
# %%
# F
docdict['f_power_clust'] = """
t_power : float
Power to raise the statistical values (usually F-values) by before
summing (sign will be retained). Note that ``t_power=0`` will give a
count of locations in each cluster, ``t_power=1`` will weight each location
by its statistical score.
"""
docdict['fiducials'] = """
fiducials : list | dict | str
The fiducials given in the MRI (surface RAS) coordinate
system. If a dictionary is provided, it must contain the **keys**
``'lpa'``, ``'rpa'``, and ``'nasion'``, with **values** being the
respective coordinates in meters.
If a list, it must be a list of ``DigPoint`` instances as returned by the
:func:`mne.io.read_fiducials` function.
If ``'estimated'``, the fiducials are derived from the ``fsaverage``
template. If ``'auto'`` (default), tries to find the fiducials
in a file with the canonical name
(``{subjects_dir}/{subject}/bem/{subject}-fiducials.fif``)
and if absent, falls back to ``'estimated'``.
"""
docdict['filter_length'] = """
filter_length : str | int
Length of the FIR filter to use (if applicable):
* **'auto' (default)**: The filter length is chosen based
on the size of the transition regions (6.6 times the reciprocal
of the shortest transition band for fir_window='hamming'
and fir_design="firwin2", and half that for "firwin").
* **str**: A human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be
converted to that number of samples if ``phase="zero"``, or
the shortest power-of-two length at least that duration for
``phase="zero-double"``.
* **int**: Specified length in samples. For fir_design="firwin",
this should not be used.
"""
docdict['filter_length_ecg'] = """
filter_length : str | int | None
Number of taps to use for filtering.
"""
docdict['filter_length_notch'] = """
filter_length : str | int
Length of the FIR filter to use (if applicable):
* **'auto' (default)**: The filter length is chosen based
on the size of the transition regions (6.6 times the reciprocal
of the shortest transition band for fir_window='hamming'
and fir_design="firwin2", and half that for "firwin").
* **str**: A human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be
converted to that number of samples if ``phase="zero"``, or
the shortest power-of-two length at least that duration for
``phase="zero-double"``.
* **int**: Specified length in samples. For fir_design="firwin",
this should not be used.
When ``method=='spectrum_fit'``, this sets the effective window duration
over which fits are computed. See :func:`mne.filter.create_filter`
for options. Longer window lengths will give more stable frequency
estimates, but require (potentially much) more processing and are not able
to adapt as well to non-stationarities.
The default in 0.21 is None, but this will change to ``'10s'`` in 0.22.
"""
docdict['fir_design'] = """
fir_design : str
Can be "firwin" (default) to use :func:`scipy.signal.firwin`,
or "firwin2" to use :func:`scipy.signal.firwin2`. "firwin" uses
a time-domain design technique that generally gives improved
attenuation using fewer samples than "firwin2".
.. versionadded:: 0.15
"""
docdict['fir_window'] = """
fir_window : str
The window to use in FIR design, can be "hamming" (default),
"hann" (default in 0.13), or "blackman".
.. versionadded:: 0.15
"""
_flat_common = """\
Reject epochs based on **minimum** peak-to-peak signal amplitude (PTP).
Valid **keys** can be any channel type present in the object. The
**values** are floats that set the minimum acceptable PTP. If the PTP
is smaller than this threshold, the epoch will be dropped. If ``None``
then no rejection is performed based on flatness of the signal."""
docdict['flat'] = f"""
flat : dict | None
{_flat_common}
.. note:: To constrain the time period used for estimation of signal
quality, pass the ``reject_tmin`` and ``reject_tmax`` parameters.
"""
docdict['flat_drop_bad'] = f"""
flat : dict | str | None
{_flat_common}
If ``'existing'``, then the flat parameters set during epoch creation are
used.
"""
docdict['fmin_fmid_fmax'] = """
fmin : float
Minimum value in colormap (uses real fmin if None).
fmid : float
Intermediate value in colormap (fmid between fmin and
fmax if None).
fmax : float
Maximum value in colormap (uses real max if None).
"""
docdict['fname_epochs'] = """
fname : path-like | file-like
The epochs to load. If a filename, should end with ``-epo.fif`` or
``-epo.fif.gz``. If a file-like object, preloading must be used.
"""
docdict['fname_export_params'] = """
fname : str
Name of the output file.
"""
docdict['fnirs'] = """
fnirs : str | list | bool | None
Can be "channels", "pairs", "detectors", and/or "sources" to show the
fNIRS channel locations, optode locations, or line between
source-detector pairs, or a combination like ``('pairs', 'channels')``.
True translates to ``('pairs',)``.
"""
docdict['focalpoint'] = """
focalpoint : tuple, shape (3,) | None
The focal point of the camera rendering the view: (x, y, z) in
plot units (either m or mm).
"""
docdict['forward_set_eeg_reference'] = """
forward : instance of Forward | None
Forward solution to use. Only used with ``ref_channels='REST'``.
.. versionadded:: 0.21
"""
docdict['fullscreen'] = """
fullscreen : bool
Whether to start in fullscreen (``True``) or windowed mode
(``False``).
"""
applyfun_fun_base = """
fun : callable
A function to be applied to the channels. The first argument of
fun has to be a timeseries (:class:`numpy.ndarray`). The function must
operate on an array of shape ``(n_times,)`` {}.
The function must return an :class:`~numpy.ndarray` shaped like its input.
"""
docdict['fun_applyfun'] = applyfun_fun_base .format(
' if ``channel_wise=True`` and ``(len(picks), n_times)`` otherwise')
docdict['fun_applyfun_evoked'] = applyfun_fun_base .format(
' because it will apply channel-wise')
docdict['fwd'] = """
fwd : instance of Forward
The forward solution. If present, the orientations of the dipoles
present in the forward solution are displayed.
"""
# %%
# G
docdict['get_peak_parameters'] = """
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
Whether to return the vertex index (True) instead of of its ID
(False, default).
time_as_index : bool
Whether to return the time index (True) instead of the latency
(False, default).
"""
docdict['group_by_browse'] = """
group_by : str
How to group channels. ``'type'`` groups by channel type,
``'original'`` plots in the order of ch_names, ``'selection'`` uses
Elekta's channel groupings (only works for Neuromag data),
``'position'`` groups the channels by the positions of the sensors.
``'selection'`` and ``'position'`` modes allow custom selections by
using a lasso selector on the topomap. In butterfly mode, ``'type'``
and ``'original'`` group the channels by type, whereas ``'selection'``
and ``'position'`` use regional grouping. ``'type'`` and ``'original'``
modes are ignored when ``order`` is not ``None``. Defaults to ``'type'``.
"""
# %%
# H
docdict['h_freq'] = """
h_freq : float | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper
cutoff frequency. If None the data are only high-passed.
"""
docdict['h_trans_bandwidth'] = """
h_trans_bandwidth : float | str
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Can be "auto"
(default in 0.14) to use a multiple of ``h_freq``::
min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq)
Only used for ``method='fir'``.
"""
docdict['head_pos'] = """
head_pos : None | str | dict | tuple | array
Name of the position estimates file. Should be in the format of
the files produced by MaxFilter. If dict, keys should
be the time points and entries should be 4x4 ``dev_head_t``
matrices. If None, the original head position (from
``info['dev_head_t']``) will be used. If tuple, should have the
same format as data returned by ``head_pos_to_trans_rot_t``.
If array, should be of the form returned by
:func:`mne.chpi.read_head_pos`.
"""
docdict['head_pos_maxwell'] = """
head_pos : array | None
If array, movement compensation will be performed.
The array should be of shape (N, 10), holding the position
parameters as returned by e.g. ``read_head_pos``.
"""
docdict['head_source'] = """
head_source : str | list of str
Head source(s) to use. See the ``source`` option of
:func:`mne.get_head_surf` for more information.
"""
docdict['hitachi_notes'] = """
Hitachi does not encode their channel positions, so you will need to
create a suitable mapping using :func:`mne.channels.make_standard_montage`
or :func:`mne.channels.make_dig_montage` like (for a 3x5/ETG-7000 example):
>>> mon = mne.channels.make_standard_montage('standard_1020')
>>> need = 'S1 D1 S2 D2 S3 D3 S4 D4 S5 D5 S6 D6 S7 D7 S8'.split()
>>> have = 'F3 FC3 C3 CP3 P3 F5 FC5 C5 CP5 P5 F7 FT7 T7 TP7 P7'.split()
>>> mon.rename_channels(dict(zip(have, need)))
>>> raw.set_montage(mon) # doctest: +SKIP
The 3x3 (ETG-100) is laid out as two separate layouts::
S1--D1--S2 S6--D6--S7
| | | | | |
D2--S3--D3 D7--S8--D8
| | | | | |
S4--D4--S5 S9--D9--S10
The 3x5 (ETG-7000) is laid out as::
S1--D1--S2--D2--S3
| | | | |
D3--S4--D4--S5--D5
| | | | |
S6--D6--S7--D7--S8
The 4x4 (ETG-7000) is laid out as::
S1--D1--S2--D2
| | | |
D3--S3--D4--S4
| | | |
S5--D5--S6--D6
| | | |
D7--S7--D8--S8
The 3x11 (ETG-4000) is laid out as::
S1--D1--S2--D2--S3--D3--S4--D4--S5--D5--S6
| | | | | | | | | | |
D6--S7--D7--S8--D8--S9--D9--S10-D10-S11-D11
| | | | | | | | | | |
S12-D12-S13-D13-S14-D14-S16-D16-S17-D17-S18
For each layout, the channels come from the (left-to-right) neighboring
source-detector pairs in the first row, then between the first and second row,
then the second row, etc.
.. versionadded:: 0.24
"""
# %%
# I
docdict['idx_pctf'] = """
idx : list of int | list of Label
Source for indices for which to compute PSFs or CTFs. If mode is None,
PSFs/CTFs will be returned for all indices. If mode is not None, the
corresponding summary measure will be computed across all PSFs/CTFs
available from idx.
Can be:
* list of integers : Compute PSFs/CTFs for all indices to source space
vertices specified in idx.
* list of Label : Compute PSFs/CTFs for source space vertices in
specified labels.
"""
docdict['ignore_ref_maxwell'] = """
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
"""
docdict['iir_params'] = """
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
If iir_params is None and method="iir", 4th order Butterworth will be used.
For more information, see :func:`mne.filter.construct_iir_filter`.
"""
docdict['image_format_report'] = """
image_format : 'png' | 'svg' | 'gif' | None
The image format to be used for the report, can be ``'png'``,
``'svg'``, or ``'gif'``.
None (default) will use the default specified during `~mne.Report`
instantiation.
"""
docdict['image_interp_topomap'] = """
image_interp : str
The image interpolation to be used. Options are ``'cubic'`` (default)
to use :class:`scipy.interpolate.CloughTocher2DInterpolator`,
``'nearest'`` to use :class:`scipy.spatial.Voronoi` or
``'linear'`` to use :class:`scipy.interpolate.LinearNDInterpolator`.
"""
docdict['include_tmax'] = """
include_tmax : bool
If True (default), include tmax. If False, exclude tmax (similar to how
Python indexing typically works).
.. versionadded:: 0.19
"""
_index_df_base = """
index : {} | None
Kind of index to use for the DataFrame. If ``None``, a sequential
integer index (:class:`pandas.RangeIndex`) will be used. If ``'time'``, a
:class:`pandas.Float64Index`, :class:`pandas.Int64Index`, {}or
:class:`pandas.TimedeltaIndex` will be used
(depending on the value of ``time_format``). {}
"""
docdict['index_df'] = _index_df_base
datetime = ':class:`pandas.DatetimeIndex`, '
multiindex = ('If a list of two or more string values, a '
':class:`pandas.MultiIndex` will be created. ')
raw = ("'time'", datetime, '')
epo = ('str | list of str', '', multiindex)
evk = ("'time'", '', '')
docdict['index_df_epo'] = _index_df_base.format(*epo)
docdict['index_df_evk'] = _index_df_base.format(*evk)
docdict['index_df_raw'] = _index_df_base.format(*raw)
_info_base = ('The :class:`mne.Info` object with information about the '
'sensors and methods of measurement.')
docdict['info'] = f"""
info : mne.Info | None
{_info_base}
"""
docdict['info_not_none'] = f"""
info : mne.Info
{_info_base}
"""
docdict['info_str'] = f"""
info : mne.Info | path-like
{_info_base} If ``path-like``, it should be a :class:`str` or
:class:`pathlib.Path` to a file with measurement information
(e.g. :class:`mne.io.Raw`).
"""
docdict['int_order_maxwell'] = """
int_order : int
Order of internal component of spherical expansion.
"""
docdict['interaction_scene'] = """
interaction : 'trackball' | 'terrain'
How interactions with the scene via an input device (e.g., mouse or
trackpad) modify the camera position. If ``'terrain'``, one axis is
fixed, enabling "turntable-style" rotations. If ``'trackball'``,
movement along all axes is possible, which provides more freedom of
movement, but you may incidentally perform unintentional rotations along
some axes.
"""
docdict['interaction_scene_none'] = """
interaction : 'trackball' | 'terrain' | None
How interactions with the scene via an input device (e.g., mouse or
trackpad) modify the camera position. If ``'terrain'``, one axis is
fixed, enabling "turntable-style" rotations. If ``'trackball'``,
movement along all axes is possible, which provides more freedom of
movement, but you may incidentally perform unintentional rotations along
some axes.
If ``None``, the setting stored in the MNE-Python configuration file is
used.
"""
docdict['interp'] = """
interp : str
Either 'hann', 'cos2' (default), 'linear', or 'zero', the type of
forward-solution interpolation to use between forward solutions
at different head positions.
"""
docdict['interpolation_brain_time'] = """
interpolation : str | None
Interpolation method (:class:`scipy.interpolate.interp1d` parameter).
Must be one of 'linear', 'nearest', 'zero', 'slinear', 'quadratic',
or 'cubic'.
"""
docdict['inversion_bf'] = """
inversion : 'single' | 'matrix'
This determines how the beamformer deals with source spaces in "free"
orientation. Such source spaces define three orthogonal dipoles at each
source point. When ``inversion='single'``, each dipole is considered
as an individual source and the corresponding spatial filter is
computed for each dipole separately. When ``inversion='matrix'``, all
three dipoles at a source vertex are considered as a group and the
spatial filters are computed jointly using a matrix inversion. While
``inversion='single'`` is more stable, ``inversion='matrix'`` is more
precise. See section 5 of :footcite:`vanVlietEtAl2018`.
Defaults to ``'matrix'``.
"""
# %%
# K
docdict['keep_his_anonymize_info'] = """
keep_his : bool
If ``True``, ``his_id`` of ``subject_info`` will **not** be overwritten.
Defaults to ``False``.
.. warning:: This could mean that ``info`` is not fully
anonymized. Use with caution.
"""
docdict['kwargs_fun'] = """
**kwargs : dict
Additional keyword arguments to pass to ``fun``.
"""
# %%
# L
docdict['l_freq'] = """
l_freq : float | None
For FIR filters, the lower pass-band edge; for IIR filters, the lower
cutoff frequency. If None the data are only low-passed.
"""
docdict['l_freq_ecg_filter'] = """
l_freq : float
Low pass frequency to apply to the ECG channel while finding events.
h_freq : float
High pass frequency to apply to the ECG channel while finding events.
"""
docdict['l_trans_bandwidth'] = """
l_trans_bandwidth : float | str
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Can be "auto"
(default) to use a multiple of ``l_freq``::
min(max(l_freq * 0.25, 2), l_freq)
Only used for ``method='fir'``.
"""
docdict['label_tc_el_returns'] = """
label_tc : array | list (or generator) of array, shape (n_labels[, n_orient], n_times)
Extracted time course for each label and source estimate.
""" # noqa: E501
docdict['labels_eltc'] = """
labels : Label | BiHemiLabel | list | tuple | str
If using a surface or mixed source space, this should be the
:class:`~mne.Label`'s for which to extract the time course.
If working with whole-brain volume source estimates, this must be one of:
- a string path to a FreeSurfer atlas for the subject (e.g., their
'aparc.a2009s+aseg.mgz') to extract time courses for all volumes in the
atlas
- a two-element list or tuple, the first element being a path to an atlas,
and the second being a list or dict of ``volume_labels`` to extract
(see :func:`mne.setup_volume_source_space` for details).
.. versionchanged:: 0.21.0
Support for volume source estimates.
"""
docdict['line_alpha_plot_psd'] = """
line_alpha : float | None
Alpha for the PSD line. Can be None (default) to use 1.0 when
``average=True`` and 0.1 when ``average=False``.
"""
_long_format_df_base = """
long_format : bool
If True, the DataFrame is returned in long format where each row is one
observation of the signal at a unique combination of time point{}.
{}Defaults to ``False``.
"""
ch_type = ('For convenience, a ``ch_type`` column is added to facilitate '
'subsetting the resulting DataFrame. ')
raw = (' and channel', ch_type)
epo = (', channel, epoch number, and condition', ch_type)
stc = (' and vertex', '')
docdict['long_format_df_epo'] = _long_format_df_base.format(*epo)
docdict['long_format_df_raw'] = _long_format_df_base.format(*raw)
docdict['long_format_df_stc'] = _long_format_df_base.format(*stc)
docdict['loose'] = """
loose : float | 'auto' | dict
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. Can be:
- float between 0 and 1 (inclusive)
If 0, then the solution is computed with fixed orientation.
If 1, it corresponds to free orientations.
- ``'auto'`` (default)
Uses 0.2 for surface source spaces (unless ``fixed`` is True) and
1.0 for other source spaces (volume or mixed).
- dict
Mapping from the key for a given source space type (surface, volume,
discrete) to the loose value. Useful mostly for mixed source spaces.
"""
# %%
# M
docdict['mag_scale_maxwell'] = """
mag_scale : float | str
The magenetometer scale-factor used to bring the magnetometers
to approximately the same order of magnitude as the gradiometers
(default 100.), as they have different units (T vs T/m).
Can be ``'auto'`` to use the reciprocal of the physical distance
between the gradiometer pickup loops (e.g., 0.0168 m yields
59.5 for VectorView).
"""
docdict['mapping_rename_channels_duplicates'] = """
mapping : dict | callable
A dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string.
.. versionchanged:: 0.10.0
Support for a callable function.
allow_duplicates : bool
If True (default False), allow duplicates, which will automatically
be renamed with ``-N`` at the end.
.. versionadded:: 0.22.0
"""
_mask_base = """
mask : ndarray of bool, shape {shape} | None
Array indicating channel{shape_appendix} to highlight with a distinct
plotting style{example}. Array elements set to ``True`` will be plotted
with the parameters given in ``mask_params``. Defaults to ``None``,
equivalent to an array of all ``False`` elements.
"""
docdict['mask_evoked_topomap'] = _mask_base.format(
shape='(n_channels, n_times)', shape_appendix='-time combinations',
example=' (useful for, e.g. marking which channels at which times a '
'statistical test of the data reaches significance)')
docdict['mask_params_topomap'] = """
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
"""
docdict['mask_patterns_topomap'] = _mask_base.format(
shape='(n_channels, n_patterns)', shape_appendix='-pattern combinations',
example='')
docdict['mask_topomap'] = _mask_base.format(
shape='(n_channels,)', shape_appendix='(s)', example='')
docdict['match_alias'] = """
match_alias : bool | dict
Whether to use a lookup table to match unrecognized channel location names
to their known aliases. If True, uses the mapping in
``mne.io.constants.CHANNEL_LOC_ALIASES``. If a :class:`dict` is passed, it
will be used instead, and should map from non-standard channel names to
names in the specified ``montage``. Default is ``False``.
.. versionadded:: 0.23
"""
docdict['match_case'] = """
match_case : bool
If True (default), channel name matching will be case sensitive.
.. versionadded:: 0.20
"""
docdict['max_step_clust'] = """
max_step : int
Maximum distance between samples along the second axis of ``X`` to be
considered adjacent (typically the second axis is the "time" dimension).
Only used when ``adjacency`` has shape (n_vertices, n_vertices), that is,
when adjacency is only specified for sensors (e.g., via
:func:`mne.channels.find_ch_adjacency`), and not via sensors **and**
further dimensions such as time points (e.g., via an additional call of
:func:`mne.stats.combine_adjacency`).
"""
docdict['measure'] = """
measure : 'zscore' | 'correlation'
Which method to use for finding outliers among the components:
- ``'zscore'`` (default) is the iterative z-scoring method. This method
computes the z-score of the component's scores and masks the components
with a z-score above threshold. This process is repeated until no
supra-threshold component remains.
- ``'correlation'`` is an absolute raw correlation threshold ranging from 0
to 1.
.. versionadded:: 0.21"""
docdict['meg'] = """
meg : str | list | bool | None
Can be "helmet", "sensors" or "ref" to show the MEG helmet, sensors or
reference sensors respectively, or a combination like
``('helmet', 'sensors')`` (same as None, default). True translates to
``('helmet', 'sensors', 'ref')``.
"""
docdict['metadata_epochs'] = """
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` specifying metadata about each epoch.
If given, ``len(metadata)`` must equal ``len(events)``. The DataFrame
may only contain values of type (str | int | float | bool).
If metadata is given, then pandas-style queries may be used to select
subsets of data, see :meth:`mne.Epochs.__getitem__`.
When a subset of the epochs is created in this (or any other
supported) manner, the metadata object is subsetted accordingly, and
the row indices will be modified to match ``epochs.selection``.
.. versionadded:: 0.16
"""
docdict['method_fir'] = """
method : str
'fir' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
"""
docdict['mode_eltc'] = """
mode : str
Extraction mode, see Notes.
"""
docdict['mode_pctf'] = """
mode : None | 'mean' | 'max' | 'svd'
Compute summary of PSFs/CTFs across all indices specified in 'idx'.
Can be:
* None : Output individual PSFs/CTFs for each specific vertex
(Default).
* 'mean' : Mean of PSFs/CTFs across vertices.
* 'max' : PSFs/CTFs with maximum norm across vertices. Returns the
n_comp largest PSFs/CTFs.
* 'svd' : SVD components across PSFs/CTFs across vertices. Returns the
n_comp first SVD components.
"""
docdict['montage'] = """
montage : None | str | DigMontage
A montage containing channel positions. If a string or
:class:`~mne.channels.DigMontage` is
specified, the existing channel information will be updated with the
channel positions from the montage. Valid strings are the names of the
built-in montages that ship with MNE-Python; you can list those via
:func:`mne.channels.get_builtin_montages`.
If ``None`` (default), the channel positions will be removed from the
:class:`~mne.Info`.
"""
docdict['montage_types'] = """EEG/sEEG/ECoG/DBS/fNIRS"""
docdict['moving'] = """
moving : instance of SpatialImage
The image to morph ("from" volume).
"""
docdict['mri_resolution_eltc'] = """
mri_resolution : bool
If True (default), the volume source space will be upsampled to the
original MRI resolution via trilinear interpolation before the atlas values
are extracted. This ensnures that each atlas label will contain source
activations. When False, only the original source space points are used,
and some atlas labels thus may not contain any source space vertices.
.. versionadded:: 0.21.0
"""
# %%
# N
docdict['n_comp_pctf_n'] = """
n_comp : int
Number of PSF/CTF components to return for mode='max' or mode='svd'.
Default n_comp=1.
"""
docdict['n_jobs'] = """
n_jobs : int | None
The number of jobs to run in parallel. If ``-1``, it is set
to the number of CPU cores. Requires the :mod:`joblib` package.
``None`` (default) is a marker for 'unset' that will be interpreted
as ``n_jobs=1`` (sequential execution) unless the call is performed under
a :func:`joblib:joblib.parallel_backend` context manager that sets another
value for ``n_jobs``.
"""
docdict['n_jobs_cuda'] = """
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if ``cupy``
is installed properly.
"""
docdict['n_jobs_fir'] = """
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if ``cupy``
is installed properly and method='fir'.
"""
docdict['n_pca_components_apply'] = """
n_pca_components : int | float | None
The number of PCA components to be kept, either absolute (int)
or fraction of the explained variance (float). If None (default),
the ``ica.n_pca_components`` from initialization will be used in 0.22;
in 0.23 all components will be used.
"""
docdict['n_permutations_clust_all'] = """
n_permutations : int | 'all'
The number of permutations to compute. Can be 'all' to perform
an exact test.
"""
docdict['n_permutations_clust_int'] = """
n_permutations : int
The number of permutations to compute.
"""
docdict['nirx_notes'] = """
This function has only been tested with NIRScout and NIRSport devices,
and with the NIRStar software version 15 and above and Aurora software
2021 and above.
The NIRSport device can detect if the amplifier is saturated.
Starting from NIRStar 14.2, those saturated values are replaced by NaNs
in the standard .wlX files.
The raw unmodified measured values are stored in another file
called .nosatflags_wlX. As NaN values can cause unexpected behaviour with
mathematical functions the default behaviour is to return the
saturated data.
"""
docdict['niter'] = """
niter : dict | tuple | None
For each phase of the volume registration, ``niter`` is the number of
iterations per successive stage of optimization. If a tuple is
provided, it will be used for all steps (except center of mass, which does
not iterate). It should have length 3 to
correspond to ``sigmas=[3.0, 1.0, 0.0]`` and ``factors=[4, 2, 1]`` in
the pipeline (see :func:`dipy.align.affine_registration
<dipy.align._public.affine_registration>` for details).
If a dictionary is provided, number of iterations can be set for each
step as a key. Steps not in the dictionary will use the default value.
The default (None) is equivalent to:
niter=dict(translation=(100, 100, 10),
rigid=(100, 100, 10),
affine=(100, 100, 10),
sdr=(5, 5, 3))
"""
docdict['norm_pctf'] = """
norm : None | 'max' | 'norm'
Whether and how to normalise the PSFs and CTFs. This will be applied
before computing summaries as specified in 'mode'.
Can be:
* None : Use un-normalized PSFs/CTFs (Default).
* 'max' : Normalize to maximum absolute value across all PSFs/CTFs.
* 'norm' : Normalize to maximum norm across all PSFs/CTFs.
"""
docdict['normalization'] = """normalization : 'full' | 'length'
Normalization strategy. If "full", the PSD will be normalized by the
sampling rate as well as the length of the signal (as in
:ref:`Nitime <nitime:users-guide>`). Default is ``'length'``."""
docdict['normalize_psd_topo'] = """
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
"""
docdict['notes_tmax_included_by_default'] = """
Unlike Python slices, MNE time intervals by default include **both**
their end points; ``crop(tmin, tmax)`` returns the interval
``tmin <= t <= tmax``. Pass ``include_tmax=False`` to specify the half-open
interval ``tmin <= t < tmax`` instead.
"""
docdict['npad'] = """
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
"""
# %%
# O
docdict['offset_decim'] = """
offset : int
Apply an offset to where the decimation starts relative to the
sample corresponding to t=0. The offset is in samples at the
current sampling rate.
.. versionadded:: 0.12
"""
docdict['on_defects'] = """
on_defects : 'raise' | 'warn' | 'ignore'
What to do if the surface is found to have topological defects.
Can be ``'raise'`` (default) to raise an error, ``'warn'`` to emit a
warning, or ``'ignore'`` to ignore when one or more defects are found.
Note that a lot of computations in MNE-Python assume the surfaces to be
topologically correct, topological defects may still make other
computations (e.g., `mne.make_bem_model` and `mne.make_bem_solution`)
fail irrespective of this parameter.
"""
docdict['on_header_missing'] = """
on_header_missing : str
Can be ``'raise'`` (default) to raise an error, ``'warn'`` to emit a
warning, or ``'ignore'`` to ignore when the FastSCAN header is missing.
.. versionadded:: 0.22
"""
_on_missing_base = """\
Can be ``'raise'`` (default) to raise an error, ``'warn'`` to emit a
warning, or ``'ignore'`` to ignore when"""
docdict['on_mismatch_info'] = f"""
on_mismatch : 'raise' | 'warn' | 'ignore'
{_on_missing_base} the device-to-head transformation differs between
instances.
.. versionadded:: 0.24
"""
docdict['on_missing_ch_names'] = f"""
on_missing : 'raise' | 'warn' | 'ignore'
{_on_missing_base} entries in ch_names are not present in the raw instance.
.. versionadded:: 0.23.0
"""
docdict['on_missing_chpi'] = f"""
on_missing : 'raise' | 'warn' | 'ignore'
{_on_missing_base} no cHPI information can be found. If ``'ignore'`` or
``'warn'``, all return values will be empty arrays or ``None``. If
``'raise'``, an exception will be raised.
"""
docdict['on_missing_epochs'] = """
on_missing : 'raise' | 'warn' | 'ignore'
What to do if one or several event ids are not found in the recording.
Valid keys are 'raise' | 'warn' | 'ignore'
Default is ``'raise'``. If ``'warn'``, it will proceed but
warn; if ``'ignore'``, it will proceed silently.
.. note::
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
"""
docdict['on_missing_events'] = f"""
on_missing : 'raise' | 'warn' | 'ignore'
{_on_missing_base} event numbers from ``event_id`` are missing from
:term:`events`. When numbers from :term:`events` are missing from
``event_id`` they will be ignored and a warning emitted; consider
using ``verbose='error'`` in this case.
.. versionadded:: 0.21
"""
docdict['on_missing_fwd'] = f"""
on_missing : 'raise' | 'warn' | 'ignore'
{_on_missing_base} ``stc`` has vertices that are not in ``fwd``.
"""
docdict['on_missing_montage'] = f"""
on_missing : 'raise' | 'warn' | 'ignore'
{_on_missing_base} channels have missing coordinates.
.. versionadded:: 0.20.1
"""
docdict['on_rank_mismatch'] = """
on_rank_mismatch : str
If an explicit MEG value is passed, what to do when it does not match
an empirically computed rank (only used for covariances).
Can be 'raise' to raise an error, 'warn' (default) to emit a warning, or
'ignore' to ignore.
.. versionadded:: 0.23
"""
docdict['on_split_missing'] = f"""
on_split_missing : str
{_on_missing_base} split file is missing.
.. versionadded:: 0.22
"""
docdict['origin_maxwell'] = """
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in meters.
The default is ``'auto'``, which means ``(0., 0., 0.)`` when
``coord_frame='meg'``, and a head-digitization-based
origin fit using :func:`~mne.bem.fit_sphere_to_headshape`
when ``coord_frame='head'``. If automatic fitting fails (e.g., due
to having too few digitization points),
consider separately calling the fitting function with different
options or specifying the origin manually.
"""
docdict['out_type_clust'] = """
out_type : 'mask' | 'indices'
Output format of clusters within a list.
If ``'mask'``, returns a list of boolean arrays,
each with the same shape as the input data (or slices if the shape is 1D
and adjacency is None), with ``True`` values indicating locations that are
part of a cluster. If ``'indices'``, returns a list of tuple of ndarray,
where each ndarray contains the indices of locations that together form the
given cluster along the given dimension. Note that for large datasets,
``'indices'`` may use far less memory than ``'mask'``.
Default is ``'indices'``.
"""
docdict['outlines_topomap'] = """
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
"""
docdict['overview_mode'] = """
overview_mode : str | None
Can be "channels", "empty", or "hidden" to set the overview bar mode
for the ``'qt'`` backend. If None (default), the config option
``MNE_BROWSER_OVERVIEW_MODE`` will be used, defaulting to "channels"
if it's not found.
"""
docdict['overwrite'] = """
overwrite : bool
If True (default False), overwrite the destination file if it
exists.
"""
# %%
# P
_pad_base = """
pad : str
The type of padding to use. Supports all :func:`numpy.pad` ``mode``
options. Can also be ``"reflect_limited"``, which pads with a
reflected version of each vector mirrored on the first and last values
of the vector, followed by zeros.
"""
docdict['pad'] = _pad_base
docdict['pad_fir'] = _pad_base + """
Only used for ``method='fir'``.
"""
docdict['pca_vars_pctf'] = """
pca_vars : array, shape (n_comp,) | list of array
The explained variances of the first n_comp SVD components across the
PSFs/CTFs for the specified vertices. Arrays for multiple labels are
returned as list. Only returned if mode='svd' and return_pca_vars=True.
"""
docdict['phase'] = """
phase : str
Phase of the filter, only used if ``method='fir'``.
Symmetric linear-phase FIR filters are constructed, and if ``phase='zero'``
(default), the delay of this filter is compensated for, making it
non-causal. If ``phase='zero-double'``,
then this filter is applied twice, once forward, and once backward
(also making it non-causal). If ``'minimum'``, then a minimum-phase filter
will be constricted and applied, which is causal but has weaker stop-band
suppression.
.. versionadded:: 0.13
"""
docdict['physical_range_export_params'] = """
physical_range : str | tuple
The physical range of the data. If 'auto' (default), then
it will infer the physical min and max from the data itself,
taking the minimum and maximum values per channel type.
If it is a 2-tuple of minimum and maximum limit, then those
physical ranges will be used. Only used for exporting EDF files.
"""
_pick_ori_novec = """
Options:
- ``None``
Pooling is performed by taking the norm of loose/free
orientations. In case of a fixed source space no norm is computed
leading to signed source activity.
- ``"normal"``
Only the normal to the cortical surface is kept. This is only
implemented when working with loose orientations.
"""
docdict['pick_ori'] = """
pick_ori : None | "normal" | "vector"
""" + _pick_ori_novec + """
- ``"vector"``
No pooling of the orientations is done, and the vector result
will be returned in the form of a :class:`mne.VectorSourceEstimate`
object.
"""
docdict['pick_ori_bf'] = """
pick_ori : None | str
For forward solutions with fixed orientation, None (default) must be
used and a scalar beamformer is computed. For free-orientation forward
solutions, a vector beamformer is computed and:
- ``None``
Orientations are pooled after computing a vector beamformer (Default).
- ``'normal'``
Filters are computed for the orientation tangential to the
cortical surface.
- ``'max-power'``
Filters are computed for the orientation that maximizes power.
"""
docdict['pick_ori_novec'] = """
pick_ori : None | "normal"
""" + _pick_ori_novec
_picks_types = 'str | list | slice | None'
_picks_header = f'picks : {_picks_types}'
_picks_desc = 'Channels to include.'
_picks_int = ('Slices and lists of integers will be interpreted as channel '
'indices.')
_picks_str = """In lists, channel *type* strings
(e.g., ``['meg', 'eeg']``) will pick channels of those
types, channel *name* strings (e.g., ``['MEG0111', 'MEG2623']``
will pick the given channels. Can also be the string values
"all" to pick all channels, or "data" to pick :term:`data channels`.
None (default) will pick"""
_reminder = ("Note that channels in ``info['bads']`` *will be included* if "
"their {}indices are explicitly provided.")
reminder = _reminder.format('names or ')
reminder_nostr = _reminder.format('')
noref = f'(excluding reference MEG channels). {reminder}'
picks_base = f"""{_picks_header}
{_picks_desc} {_picks_int} {_picks_str}"""
docdict['picks_all'] = _reflow_param_docstring(
f'{picks_base} all channels. {reminder}')
docdict['picks_all_data'] = _reflow_param_docstring(
f'{picks_base} all data channels. {reminder}')
docdict['picks_all_data_noref'] = _reflow_param_docstring(
f'{picks_base} all data channels {noref}')
docdict['picks_base'] = _reflow_param_docstring(picks_base)
docdict['picks_good_data'] = _reflow_param_docstring(
f'{picks_base} good data channels. {reminder}')
docdict['picks_good_data_noref'] = _reflow_param_docstring(
f'{picks_base} good data channels {noref}')
docdict['picks_header'] = _picks_header
docdict['picks_ica'] = """
picks : int | list of int | slice | None
Indices of the independent components (ICs) to visualize.
If an integer, represents the index of the IC to pick.
Multiple ICs can be selected using a list of int or a slice.
The indices are 0-indexed, so ``picks=1`` will
pick the second IC: ``ICA001``.
"""
docdict['picks_nostr'] = f"""picks : list | slice | None
{_picks_desc} {_picks_int}
None (default) will pick all channels. {reminder_nostr}"""
docdict['picks_plot_projs_joint_trace'] = f"""\
picks_trace : {_picks_types}
Channels to show alongside the projected time courses. Typically
these are the ground-truth channels for an artifact (e.g., ``'eog'`` or
``'ecg'``). {_picks_int} {_picks_str} no channels.
"""
docdict['picks_plot_psd_good_data'] = \
f'{picks_base} good data channels. {reminder}'[:-2] + """
Cannot be None if ``ax`` is supplied.If both ``picks`` and ``ax`` are None
separate subplots will be created for each standard channel type
(``mag``, ``grad``, and ``eeg``).
"""
docdict['pipeline'] = """
pipeline : str | tuple
The volume registration steps to perform (a ``str`` for a single step,
or ``tuple`` for a set of sequential steps). The following steps can be
performed, and do so by matching mutual information between the images
(unless otherwise noted):
``'translation'``
Translation.
``'rigid'``
Rigid-body, i.e., rotation and translation.
``'affine'``
A full affine transformation, which includes translation, rotation,
scaling, and shear.
``'sdr'``
Symmetric diffeomorphic registration :footcite:`AvantsEtAl2008`, a
non-linear similarity-matching algorithm.
The following string shortcuts can also be used:
``'all'`` (default)
All steps will be performed above in the order above, i.e.,
``('translation', 'rigid', 'affine', 'sdr')``.
``'rigids'``
The rigid steps (first two) will be performed, which registers
the volume without distorting its underlying structure, i.e.,
``('translation', 'rigid')``. This is useful for
example when registering images from the same subject, such as
CT and MR images.
``'affines'``
The affine steps (first three) will be performed, i.e., omitting
the SDR step.
"""
docdict['plot_psd_doc'] = """
Plot the power spectral density across channels.
Different channel types are drawn in sub-plots. When the data have been
processed with a bandpass, lowpass or highpass filter, dashed lines (╎)
indicate the boundaries of the filter. The line noise frequency is
also indicated with a dashed line (⋮)
"""
docdict['precompute'] = """
precompute : bool | str
Whether to load all data (not just the visible portion) into RAM and
apply preprocessing (e.g., projectors) to the full data array in a separate
processor thread, instead of window-by-window during scrolling. The default
None uses the ``MNE_BROWSER_PRECOMPUTE`` variable, which defaults to
``'auto'``. ``'auto'`` compares available RAM space to the expected size of
the precomputed data, and precomputes only if enough RAM is available.
This is only used with the Qt backend.
.. versionadded:: 0.24
.. versionchanged:: 1.0
Support for the MNE_BROWSER_PRECOMPUTE config variable.
"""
docdict['preload'] = """
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory)."""
docdict['preload_concatenate'] = """
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). If preload is
None, preload=True or False is inferred using the preload status
of the instances passed in.
"""
docdict['proj_epochs'] = """
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
"""
docdict['proj_plot'] = """
proj : bool | 'interactive' | 'reconstruct'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown. If 'reconstruct', projection vectors will be applied and then
M/EEG data will be reconstructed via field mapping to reduce the signal
bias caused by projection.
.. versionchanged:: 0.21
Support for 'reconstruct' was added.
"""
docdict['proj_topomap_kwargs'] = """
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode (only works if ``colorbar=True``) the colors are
adjustable by clicking and dragging the colorbar with left and right
mouse button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range. Up
and down arrows can be used to change the colormap. If None (default),
'Reds' is used for all positive data, otherwise defaults to 'RdBu_r'.
If 'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
""" + docdict['image_interp_topomap'] + """
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
vlim : tuple of length 2 | 'joint'
Colormap limits to use. If :class:`tuple`, specifies the lower and
upper bounds of the colormap (in that order); providing ``None`` for
either of these will set the corresponding boundary at the min/max of
the data (separately for each projector). The keyword value ``'joint'``
will compute the colormap limits jointly across all provided
projectors of the same channel type, using the min/max of the projector
data. If vlim is ``'joint'``, ``info`` must not be ``None``. Defaults
to ``(None, None)``.
"""
docdict['projection_set_eeg_reference'] = """
projection : bool
If ``ref_channels='average'`` this argument specifies if the
average reference should be computed as a projection (True) or not
(False; default). If ``projection=True``, the average reference is
added as a projection and is not applied to the data (it can be
applied afterwards with the ``apply_proj`` method). If
``projection=False``, the average reference is directly applied to
the data. If ``ref_channels`` is not ``'average'``, ``projection``
must be set to ``False`` (the default in this case).
"""
docdict['projs_report'] = """
projs : bool | None
Whether to add SSP projector plots if projectors are present in
the data. If ``None``, use ``projs`` from `~mne.Report` creation.
"""
# %%
# R
docdict['random_state'] = """
random_state : None | int | instance of ~numpy.random.RandomState
A seed for the NumPy random number generator (RNG). If ``None`` (default),
the seed will be obtained from the operating system
(see :class:`~numpy.random.RandomState` for details), meaning it will most
likely produce different output every time this function or method is run.
To achieve reproducible results, pass a value here to explicitly initialize
the RNG with a defined state.
"""
_rank_base = """
rank : None | 'info' | 'full' | dict
This controls the rank computation that can be read from the
measurement info or estimated from the data. When a noise covariance
is used for whitening, this should reflect the rank of that covariance,
otherwise amplification of noise components can occur in whitening (e.g.,
often during source localization).
:data:`python:None`
The rank will be estimated from the data after proper scaling of
different channel types.
``'info'``
The rank is inferred from ``info``. If data have been processed
with Maxwell filtering, the Maxwell filtering header is used.
Otherwise, the channel counts themselves are used.
In both cases, the number of projectors is subtracted from
the (effective) number of channels in the data.
For example, if Maxwell filtering reduces the rank to 68, with
two projectors the returned value will be 66.
``'full'``
The rank is assumed to be full, i.e. equal to the
number of good channels. If a `~mne.Covariance` is passed, this can
make sense if it has been (possibly improperly) regularized without
taking into account the true data rank.
:class:`dict`
Calculate the rank only for a subset of channel types, and explicitly
specify the rank for the remaining channel types. This can be
extremely useful if you already **know** the rank of (part of) your
data, for instance in case you have calculated it earlier.
This parameter must be a dictionary whose **keys** correspond to
channel types in the data (e.g. ``'meg'``, ``'mag'``, ``'grad'``,
``'eeg'``), and whose **values** are integers representing the
respective ranks. For example, ``{'mag': 90, 'eeg': 45}`` will assume
a rank of ``90`` and ``45`` for magnetometer data and EEG data,
respectively.
The ranks for all channel types present in the data, but
**not** specified in the dictionary will be estimated empirically.
That is, if you passed a dataset containing magnetometer, gradiometer,
and EEG data together with the dictionary from the previous example,
only the gradiometer rank would be determined, while the specified
magnetometer and EEG ranks would be taken for granted.
"""
docdict['rank'] = _rank_base
docdict['rank_info'] = _rank_base + "\n The default is ``'info'``."
docdict['rank_none'] = _rank_base + "\n The default is ``None``."
docdict['raw_epochs'] = """
raw : Raw object
An instance of `~mne.io.Raw`.
"""
docdict['reduce_rank'] = """
reduce_rank : bool
If True, the rank of the denominator of the beamformer formula (i.e.,
during pseudo-inversion) will be reduced by one for each spatial location.
Setting ``reduce_rank=True`` is typically necessary if you use a single
sphere model with MEG data.
.. versionchanged:: 0.20
Support for reducing rank in all modes (previously only supported
``pick='max_power'`` with weight normalization).
"""
docdict['ref_channels'] = """
ref_channels : str | list of str
Name of the electrode(s) which served as the reference in the
recording. If a name is provided, a corresponding channel is added
and its data is set to 0. This is useful for later re-referencing.
"""
docdict['ref_channels_set_eeg_reference'] = """
ref_channels : list of str | str
Can be:
- The name(s) of the channel(s) used to construct the reference.
- ``'average'`` to apply an average reference (default)
- ``'REST'`` to use the Reference Electrode Standardization Technique
infinity reference :footcite:`Yao2001`.
- An empty list, in which case MNE will not attempt any re-referencing of
the data
"""
docdict['reg_affine'] = """
reg_affine : ndarray of float, shape (4, 4)
The affine that registers one volume to another.
"""
docdict['regularize_maxwell_reg'] = """
regularize : str | None
Basis regularization type, must be "in" or None.
"in" is the same algorithm as the "-regularize in" option in
MaxFilter™.
"""
_reject_by_annotation_base = """
reject_by_annotation : bool
Whether to omit bad segments from the data before fitting. If ``True``
(default), annotated segments whose description begins with ``'bad'`` are
omitted. If ``False``, no rejection based on annotations is performed.
"""
docdict['reject_by_annotation_all'] = _reject_by_annotation_base
docdict['reject_by_annotation_epochs'] = """
reject_by_annotation : bool
Whether to reject based on annotations. If ``True`` (default), epochs
overlapping with segments whose description begins with ``'bad'`` are
rejected. If ``False``, no rejection based on annotations is performed.
"""
docdict['reject_by_annotation_raw'] = _reject_by_annotation_base + """
Has no effect if ``inst`` is not a :class:`mne.io.Raw` object.
"""
_reject_common = """\
Reject epochs based on **maximum** peak-to-peak signal amplitude (PTP),
i.e. the absolute difference between the lowest and the highest signal
value. In each individual epoch, the PTP is calculated for every channel.
If the PTP of any one channel exceeds the rejection threshold, the
respective epoch will be dropped.
The dictionary keys correspond to the different channel types; valid
**keys** can be any channel type present in the object.
Example::
reject = dict(grad=4000e-13, # unit: T / m (gradiometers)
mag=4e-12, # unit: T (magnetometers)
eeg=40e-6, # unit: V (EEG channels)
eog=250e-6 # unit: V (EOG channels)
)
.. note:: Since rejection is based on a signal **difference**
calculated for each channel separately, applying baseline
correction does not affect the rejection procedure, as the
difference will be preserved.
"""
docdict['reject_drop_bad'] = f"""
reject : dict | str | None
{_reject_common}
If ``reject`` is ``None``, no rejection is performed. If ``'existing'``
(default), then the rejection parameters set at instantiation are used.
"""
docdict['reject_epochs'] = f"""
reject : dict | None
{_reject_common}
.. note:: To constrain the time period used for estimation of signal
quality, pass the ``reject_tmin`` and ``reject_tmax`` parameters.
If ``reject`` is ``None`` (default), no rejection is performed.
"""
docdict['replace_report'] = """
replace : bool
If ``True``, content already present that has the same ``title`` will be
replaced. Defaults to ``False``, which will cause duplicate entries in the
table of contents if an entry for ``title`` already exists.
"""
docdict['res_topomap'] = """
res : int
The resolution of the topomap image (n pixels along each side).
"""
docdict['return_pca_vars_pctf'] = """
return_pca_vars : bool
Whether or not to return the explained variances across the specified
vertices for individual SVD components. This is only valid if
mode='svd'.
Default return_pca_vars=False.
"""
docdict['roll'] = """
roll : float | None
The roll of the camera rendering the view in degrees.
"""
# %%
# S
docdict['saturated'] = """saturated : str
Replace saturated segments of data with NaNs, can be:
``"ignore"``
The measured data is returned, even if it contains measurements
while the amplifier was saturated.
``"nan"``
The returned data will contain NaNs during time segments
when the amplifier was saturated.
``"annotate"`` (default)
The returned data will contain annotations specifying
sections the saturate segments.
This argument will only be used if there is no .nosatflags file
(only if a NIRSport device is used and saturation occurred).
.. versionadded:: 0.24
"""
docdict['scalings'] = """
scalings : 'auto' | dict | None
Scaling factors for the traces. If a dictionary where any
value is ``'auto'``, the scaling factor is set to match the 99.5th
percentile of the respective data. If ``'auto'``, all scalings (for all
channel types) are set to ``'auto'``. If any values are ``'auto'`` and the
data is not preloaded, a subset up to 100 MB will be loaded. If ``None``,
defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4, whitened=1e2)
.. note::
A particular scaling value ``s`` corresponds to half of the visualized
signal range around zero (i.e. from ``0`` to ``+s`` or from ``0`` to
``-s``). For example, the default scaling of ``20e-6`` (20µV) for EEG
signals means that the visualized range will be 40 µV (20 µV in the
positive direction and 20 µV in the negative direction).
"""
docdict['scalings_df'] = """
scalings : dict | None
Scaling factor applied to the channels picked. If ``None``, defaults to
``dict(eeg=1e6, mag=1e15, grad=1e13)`` — i.e., converts EEG to µV,
magnetometers to fT, and gradiometers to fT/cm.
"""
docdict['scalings_topomap'] = """
scalings : dict | float | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
"""
docdict['scoring'] = """
scoring : callable | str | None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the "predict" method is automatically identified if scoring is
a string (e.g. ``scoring='roc_auc'`` calls ``predict_proba``), but is
**not** automatically set if ``scoring`` is a callable (e.g.
``scoring=sklearn.metrics.roc_auc_score``).
"""
docdict['sdr_morph'] = """
sdr_morph : instance of dipy.align.DiffeomorphicMap
The class that applies the the symmetric diffeomorphic registration
(SDR) morph.
"""
docdict['section_report'] = """
section : str | None
The name of the section (or content block) to add the content to. This
feature is useful for grouping multiple related content elements
together under a single, collapsible section. Each content element will
retain its own title and functionality, but not appear separately in the
table of contents. Hence, using sections is a way to declutter the table
of contents, and to easy navigation of the report.
.. versionadded:: 1.1
"""
docdict['seed'] = """
seed : None | int | instance of ~numpy.random.RandomState
A seed for the NumPy random number generator (RNG). If ``None`` (default),
the seed will be obtained from the operating system
(see :class:`~numpy.random.RandomState` for details), meaning it will most
likely produce different output every time this function or method is run.
To achieve reproducible results, pass a value here to explicitly initialize
the RNG with a defined state.
"""
docdict['seeg'] = """
seeg : bool
If True (default), show sEEG electrodes.
"""
docdict['sensors_topomap'] = """
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True (default),
circles will be used.
"""
docdict['set_eeg_reference_see_also_notes'] = """
See Also
--------
mne.set_bipolar_reference : Convenience function for creating bipolar
references.
Notes
-----
Some common referencing schemes and the corresponding value for the
``ref_channels`` parameter:
- Average reference:
A new virtual reference electrode is created by averaging the current
EEG signal by setting ``ref_channels='average'``. Bad EEG channels are
automatically excluded if they are properly set in ``info['bads']``.
- A single electrode:
Set ``ref_channels`` to a list containing the name of the channel that
will act as the new reference, for example ``ref_channels=['Cz']``.
- The mean of multiple electrodes:
A new virtual reference electrode is created by computing the average
of the current EEG signal recorded from two or more selected channels.
Set ``ref_channels`` to a list of channel names, indicating which
channels to use. For example, to apply an average mastoid reference,
when using the 10-20 naming scheme, set ``ref_channels=['M1', 'M2']``.
- REST
The given EEG electrodes are referenced to a point at infinity using the
lead fields in ``forward``, which helps standardize the signals.
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference, the data must be preloaded. This is not
necessary if ``ref_channels='average'`` and ``projection=True``.
4. For an average or REST reference, bad EEG channels are automatically
excluded if they are properly set in ``info['bads']``.
.. versionadded:: 0.9.0
References
----------
.. footbibliography::
"""
docdict['show'] = """
show : bool
Show the figure if ``True``.
"""
docdict['show_names_topomap'] = """
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
``lambda x: x.replace('MEG ', '')``. If ``mask`` is not None, only
significant sensors will be shown.
"""
docdict['show_scalebars'] = """
show_scalebars : bool
Whether to show scale bars when the plot is initialized. Can be toggled
after initialization by pressing :kbd:`s` while the plot window is focused.
Default is ``True``.
"""
docdict['show_scrollbars'] = """
show_scrollbars : bool
Whether to show scrollbars when the plot is initialized. Can be toggled
after initialization by pressing :kbd:`z` ("zen mode") while the plot
window is focused. Default is ``True``.
.. versionadded:: 0.19.0
"""
docdict['show_traces'] = """
show_traces : bool | str | float
If True, enable interactive picking of a point on the surface of the
brain and plot its time course.
This feature is only available with the PyVista 3d backend, and requires
``time_viewer=True``. Defaults to 'auto', which will use True if and
only if ``time_viewer=True``, the backend is PyVista, and there is more
than one time point. If float (between zero and one), it specifies what
proportion of the total window should be devoted to traces (True is
equivalent to 0.25, i.e., it will occupy the bottom 1/4 of the figure).
.. versionadded:: 0.20.0
"""
docdict['size_topomap'] = """
size : float
Side length per topomap in inches.
"""
docdict['skip_by_annotation_maxwell'] = """
skip_by_annotation : str | list of str
If a string (or list of str), any annotation segment that begins
with the given string will not be included in filtering, and
segments on either side of the given excluded annotated segment
will be filtered separately (i.e., as independent signals).
The default ``('edge', 'bad_acq_skip')`` will separately filter
any segments that were concatenated by :func:`mne.concatenate_raws`
or :meth:`mne.io.Raw.append`, or separated during acquisition.
To disable, provide an empty list.
"""
docdict['smooth'] = """
smooth : float in [0, 1)
The smoothing factor to be applied. Default 0 is no smoothing.
"""
docdict['spatial_colors_plot_psd'] = """
spatial_colors : bool
Whether to use spatial colors. Only used when ``average=False``.
"""
_sphere_header = (
'sphere : float | array-like | instance of ConductorModel | None')
_sphere_desc = (
'The sphere parameters to use for the head outline. Can be array-like of '
'shape (4,) to give the X/Y/Z origin and radius in meters, or a single '
'float to give just the radius (origin assumed 0, 0, 0). Can also be an '
'instance of a spherical :class:`~mne.bem.ConductorModel` to use the '
'origin and radius from that object.'
)
_sphere_topo = _reflow_param_docstring(
f"""{_sphere_desc} ``None`` (the default) is equivalent to
(0, 0, 0, {HEAD_SIZE_DEFAULT}).
Currently the head radius does not affect plotting.""",
has_first_line=False)
_sphere_topo_auto = _reflow_param_docstring(
f"""{_sphere_desc} If ``'auto'`` the sphere is fit to digitization points.
If ``'eeglab'`` the head circle is defined by EEG electrodes ``'Fpz'``,
``'Oz'``, ``'T7'``, and ``'T8'`` (if ``'Fpz'`` is not present, it will
be approximated from the coordinates of ``'Oz'``). ``None`` (the default)
is equivalent to ``'auto'`` when enough extra digitization points are
available, and (0, 0, 0, {HEAD_SIZE_DEFAULT}) otherwise. Currently the head
radius does not affect plotting.""", has_first_line=False)
docdict['sphere_topomap'] = f"""
{_sphere_header}
{_sphere_topo}
.. versionadded:: 0.20
"""
docdict['sphere_topomap_auto'] = f"""\
{_sphere_header} | 'auto' | 'eeglab'
{_sphere_topo_auto}
.. versionadded:: 0.20
.. versionchanged:: 1.1 Added ``'eeglab'`` option.
"""
docdict['split_naming'] = """
split_naming : 'neuromag' | 'bids'
When splitting files, append a filename partition with the appropriate
naming schema: for ``'neuromag'``, a split file ``fname.fif`` will be named
``fname.fif``, ``fname-1.fif``, ``fname-2.fif`` etc.; while for ``'bids'``,
it will be named ``fname_split-01.fif``, ``fname_split-02.fif``, etc.
"""
docdict['src_eltc'] = """
src : instance of SourceSpaces
The source spaces for the source time courses.
"""
docdict['src_volume_options'] = """
src : instance of SourceSpaces | None
The source space corresponding to the source estimate. Only necessary
if the STC is a volume or mixed source estimate.
volume_options : float | dict | None
Options for volumetric source estimate plotting, with key/value pairs:
- ``'resolution'`` : float | None
Resolution (in mm) of volume rendering. Smaller (e.g., 1.) looks
better at the cost of speed. None (default) uses the volume source
space resolution, which is often something like 7 or 5 mm,
without resampling.
- ``'blending'`` : str
Can be "mip" (default) for :term:`maximum intensity projection` or
"composite" for composite blending using alpha values.
- ``'alpha'`` : float | None
Alpha for the volumetric rendering. Defaults are 0.4 for vector source
estimates and 1.0 for scalar source estimates.
- ``'surface_alpha'`` : float | None
Alpha for the surface enclosing the volume(s). None (default) will use
half the volume alpha. Set to zero to avoid plotting the surface.
- ``'silhouette_alpha'`` : float | None
Alpha for a silhouette along the outside of the volume. None (default)
will use ``0.25 * surface_alpha``.
- ``'silhouette_linewidth'`` : float
The line width to use for the silhouette. Default is 2.
A float input (default 1.) or None will be used for the ``'resolution'``
entry.
"""
docdict['st_fixed_maxwell_only'] = """
st_fixed : bool
If True (default), do tSSS using the median head position during the
``st_duration`` window. This is the default behavior of MaxFilter
and has been most extensively tested.
.. versionadded:: 0.12
st_only : bool
If True, only tSSS (temporal) projection of MEG data will be
performed on the output data. The non-tSSS parameters (e.g.,
``int_order``, ``calibration``, ``head_pos``, etc.) will still be
used to form the SSS bases used to calculate temporal projectors,
but the output MEG data will *only* have temporal projections
performed. Noise reduction from SSS basis multiplication,
cross-talk cancellation, movement compensation, and so forth
will not be applied to the data. This is useful, for example, when
evoked movement compensation will be performed with
:func:`~mne.epochs.average_movements`.
.. versionadded:: 0.12
"""
docdict['standardize_names'] = """
standardize_names : bool
If True, standardize MEG and EEG channel names to be
``"MEG ###"`` and ``"EEG ###"``. If False (default), native
channel names in the file will be used when possible.
"""
_stat_fun_clust_base = """
stat_fun : callable | None
Function called to calculate the test statistic. Must accept 1D-array as
input and return a 1D array. If ``None`` (the default), uses
`mne.stats.{}`.
"""
docdict['stat_fun_clust_f'] = _stat_fun_clust_base.format('f_oneway')
docdict['stat_fun_clust_t'] = _stat_fun_clust_base.format('ttest_1samp_no_p')
docdict['static'] = """
static : instance of SpatialImage
The image to align with ("to" volume).
"""
docdict['stc_plot_kwargs_report'] = """
stc_plot_kwargs : dict
Dictionary of keyword arguments to pass to
:class:`mne.SourceEstimate.plot`. Only used when plotting in 3D
mode.
"""
docdict['stcs_pctf'] = """
stcs : instance of SourceEstimate | list of instances of SourceEstimate
PSFs or CTFs as STC objects.
All PSFs/CTFs will be returned as successive samples in STC objects,
in the order they are specified in idx. STCs for different labels will
be returned as a list.
"""
docdict['std_err_by_event_type_returns'] = """
std_err : instance of Evoked | list of Evoked
The standard error over epochs.
When ``by_event_type=True`` was specified, a list is returned containing a
separate :class:`~mne.Evoked` object for each event type. The list has the
same order as the event types as specified in the ``event_id``
dictionary.
"""
docdict['step_down_p_clust'] = """
step_down_p : float
To perform a step-down-in-jumps test, pass a p-value for clusters to
exclude from each successive iteration. Default is zero, perform no
step-down test (since no clusters will be smaller than this value).
Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
but costs computation time.
"""
docdict['subject'] = """
subject : str
The FreeSurfer subject name.
"""
docdict['subject_label'] = """
subject : str | None
Subject which this label belongs to. Should only be specified if it is not
specified in the label.
"""
docdict['subject_none'] = """
subject : str | None
The FreeSurfer subject name.
"""
docdict['subject_optional'] = """
subject : str
The FreeSurfer subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
"""
docdict['subjects_dir'] = """
subjects_dir : path-like | None
The path to the directory containing the FreeSurfer subjects
reconstructions. If ``None``, defaults to the ``SUBJECTS_DIR`` environment
variable.
"""
docdict['surface'] = """surface : str
The surface along which to do the computations, defaults to ``'white'``
(the gray-white matter boundary).
"""
# %%
# T
docdict['t_power_clust'] = """
t_power : float
Power to raise the statistical values (usually t-values) by before
summing (sign will be retained). Note that ``t_power=0`` will give a
count of locations in each cluster, ``t_power=1`` will weight each location
by its statistical score.
"""
docdict['t_window_chpi_t'] = """
t_window : float
Time window to use to estimate the amplitudes, default is
0.2 (200 ms).
"""
docdict['tags_report'] = """
tags : array-like of str | str
Tags to add for later interactive filtering. Must not contain spaces.
"""
docdict['tail_clust'] = """
tail : int
If tail is 1, the statistic is thresholded above threshold.
If tail is -1, the statistic is thresholded below threshold.
If tail is 0, the statistic is thresholded on both sides of
the distribution.
"""
_theme = """\
theme : str | path-like
Can be "auto", "light", or "dark" or a path-like to a
custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection,
:mod:`qdarkstyle` and
`darkdetect <https://github.com/albertosottile/darkdetect>`__,
respectively, are required.\
If None (default), the config option {config_option} will be used,
defaulting to "auto" if it's not found.\
"""
docdict['theme_3d'] = """
{theme}
""".format(theme=_theme.format(config_option='MNE_3D_OPTION_THEME'))
docdict['theme_pg'] = """
{theme}
Only supported by the ``'qt'`` backend.
""".format(theme=_theme.format(config_option='MNE_BROWSER_THEME'))
docdict['thresh'] = """
thresh : None or float
Not supported yet.
If not None, values below thresh will not be visible.
"""
_threshold_clust_base = """
threshold : float | dict | None
The so-called "cluster forming threshold" in the form of a test statistic
(note: this is not an alpha level / "p-value").
If numeric, vertices with data values more extreme than ``threshold`` will
be used to form clusters. If ``None``, {} will be chosen
automatically that corresponds to a p-value of 0.05 for the given number of
observations (only valid when using {}). If ``threshold`` is a
:class:`dict` (with keys ``'start'`` and ``'step'``) then threshold-free
cluster enhancement (TFCE) will be used (see the
:ref:`TFCE example <tfce_example>` and :footcite:`SmithNichols2009`).
See Notes for an example on how to compute a threshold based on
a particular p-value for one-tailed or two-tailed tests.
"""
f_test = ('an F-threshold', 'an F-statistic')
docdict['threshold_clust_f'] = _threshold_clust_base.format(*f_test)
docdict['threshold_clust_f_notes'] = """
For computing a ``threshold`` based on a p-value, use the conversion
from :meth:`scipy.stats.rv_continuous.ppf`::
pval = 0.001 # arbitrary
dfn = n_conditions - 1 # degrees of freedom numerator
dfd = n_observations - n_conditions # degrees of freedom denominator
thresh = scipy.stats.f.ppf(1 - pval, dfn=dfn, dfd=dfd) # F distribution
"""
t_test = ('a t-threshold', 'a t-statistic')
docdict['threshold_clust_t'] = _threshold_clust_base.format(*t_test)
docdict['threshold_clust_t_notes'] = """
For computing a ``threshold`` based on a p-value, use the conversion
from :meth:`scipy.stats.rv_continuous.ppf`::
pval = 0.001 # arbitrary
df = n_observations - 1 # degrees of freedom for the test
thresh = scipy.stats.t.ppf(1 - pval / 2, df) # two-tailed, t distribution
For a one-tailed test (``tail=1``), don't divide the p-value by 2.
For testing the lower tail (``tail=-1``), don't subtract ``pval`` from 1.
"""
docdict['time_format'] = """
time_format : 'float' | 'clock'
Style of time labels on the horizontal axis. If ``'float'``, labels will be
number of seconds from the start of the recording. If ``'clock'``,
labels will show "clock time" (hours/minutes/seconds) inferred from
``raw.info['meas_date']``. Default is ``'float'``.
.. versionadded:: 0.24
"""
_time_format_df_base = """
time_format : str | None
Desired time format. If ``None``, no conversion is applied, and time values
remain as float values in seconds. If ``'ms'``, time values will be rounded
to the nearest millisecond and converted to integers. If ``'timedelta'``,
time values will be converted to :class:`pandas.Timedelta` values. {}
Default is ``None``.
"""
docdict['time_format_df'] = _time_format_df_base.format('')
_raw_tf = ("If ``'datetime'``, time values will be converted to "
":class:`pandas.Timestamp` values, relative to "
"``raw.info['meas_date']`` and offset by ``raw.first_samp``. ")
docdict['time_format_df_raw'] = _time_format_df_base.format(_raw_tf)
docdict['time_label'] = """
time_label : str | callable | None
Format of the time label (a format string, a function that maps
floating point time values to strings, or None for no label). The
default is ``'auto'``, which will use ``time=%0.2f ms`` if there
is more than one time point.
"""
docdict['time_viewer_brain_screenshot'] = """
time_viewer : bool
If True, include time viewer traces. Only used if
``time_viewer=True`` and ``separate_canvas=False``.
"""
docdict['title_none'] = """
title : str | None
The title of the generated figure. If ``None`` (default), no title is
displayed.
"""
docdict['tmax_raw'] = """
tmax : float
End time of the raw data to use in seconds (cannot exceed data duration).
"""
docdict['tmin'] = """
tmin : scalar
Time point of the first sample in data.
"""
docdict['tmin_raw'] = """
tmin : float
Start time of the raw data to use in seconds (must be >= 0).
"""
docdict['tol_kind_rank'] = """
tol_kind : str
Can be: "absolute" (default) or "relative". Only used if ``tol`` is a
float, because when ``tol`` is a string the mode is implicitly relative.
After applying the chosen scale factors / normalization to the data,
the singular values are computed, and the rank is then taken as:
- ``'absolute'``
The number of singular values ``s`` greater than ``tol``.
This mode can fail if your data do not adhere to typical
data scalings.
- ``'relative'``
The number of singular values ``s`` greater than ``tol * s.max()``.
This mode can fail if you have one or more large components in the
data (e.g., artifacts).
.. versionadded:: 0.21.0
"""
docdict['tol_rank'] = """
tol : float | 'auto'
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one. Can be 'auto' to use the
same thresholding as :func:`scipy.linalg.orth`.
"""
docdict['topomap_kwargs'] = """
topomap_kwargs : dict | None
Keyword arguments to pass to the topomap-generating functions.
"""
_trans_base = """\
If str, the path to the head<->MRI transform ``*-trans.fif`` file produced
during coregistration. Can also be ``'fsaverage'`` to use the built-in
fsaverage transformation."""
docdict['trans'] = f"""
trans : path-like | dict | instance of Transform | None
{_trans_base}
If trans is None, an identity matrix is assumed.
.. versionchanged:: 0.19
Support for 'fsaverage' argument.
"""
docdict['trans_not_none'] = """
trans : str | dict | instance of Transform
%s
""" % (_trans_base,)
docdict['transparent'] = """
transparent : bool | None
If True: use a linear transparency between fmin and fmid
and make values below fmin fully transparent (symmetrically for
divergent colormaps). None will choose automatically based on colormap
type.
"""
docdict['tstart_ecg'] = """
tstart : float
Start ECG detection after ``tstart`` seconds. Useful when the beginning
of the run is noisy.
"""
docdict['tstep'] = """
tstep : scalar
Time step between successive samples in data.
"""
# %%
# U
docdict['units'] = """
units : str | dict | None
Specify the unit(s) that the data should be returned in. If
``None`` (default), the data is returned in the
channel-type-specific default units, which are SI units (see
:ref:`units` and :term:`data channels`). If a string, must be a
sub-multiple of SI units that will be used to scale the data from
all channels of the type associated with that unit. This only works
if the data contains one channel type that has a unit (unitless
channel types are left unchanged). For example if there are only
EEG and STIM channels, ``units='uV'`` will scale EEG channels to
micro-Volts while STIM channels will be unchanged. Finally, if a
dictionary is provided, keys must be channel types, and values must
be units to scale the data of that channel type to. For example
``dict(grad='fT/cm', mag='fT')`` will scale the corresponding types
accordingly, but all other channel types will remain in their
channel-type-specific default unit.
"""
docdict['units_topomap'] = """
units : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
"""
docdict['use_cps'] = """
use_cps : bool
Whether to use cortical patch statistics to define normal orientations for
surfaces (default True).
"""
docdict['use_cps_restricted'] = """
use_cps : bool
Whether to use cortical patch statistics to define normal orientations for
surfaces (default True).
Only used when the inverse is free orientation (``loose=1.``),
not in surface orientation, and ``pick_ori='normal'``.
"""
docdict['use_opengl'] = """
use_opengl : bool | None
Whether to use OpenGL when rendering the plot (requires ``pyopengl``).
May increase performance, but effect is dependent on system CPU and
graphics hardware. Only works if using the Qt backend. Default is
None, which will use False unless the user configuration variable
``MNE_BROWSER_USE_OPENGL`` is set to ``'true'``,
see :func:`mne.set_config`.
.. versionadded:: 0.24
"""
# %%
# V
docdict['verbose'] = """
verbose : bool | str | int | None
Control verbosity of the logging output. If ``None``, use the default
verbosity level. See the :ref:`logging documentation <tut-logging>` and
:func:`mne.verbose` for details. Should only be passed as a keyword
argument."""
docdict['vertices_volume'] = """
vertices : list of array of int
The indices of the dipoles in the source space. Should be a single
array of shape (n_dipoles,) unless there are subvolumes.
"""
docdict['view'] = """
view : str | None
The name of the view to show (e.g. "lateral"). Other arguments
take precedence and modify the camera starting from the ``view``.
See :meth:`Brain.show_view <mne.viz.Brain.show_view>` for valid
string shortcut options.
"""
docdict['view_layout'] = """
view_layout : str
Can be "vertical" (default) or "horizontal". When using "horizontal" mode,
the PyVista backend must be used and hemi cannot be "split".
"""
docdict['views'] = """
views : str | list
View to use. Using multiple views (list) is not supported for mpl
backend. See :meth:`Brain.show_view <mne.viz.Brain.show_view>` for
valid string options.
"""
docdict['vlim_psd_topo_joint'] = """
vlim : tuple of length 2 | 'joint'
Colormap limits to use. If a :class:`tuple` of floats, specifies the
lower and upper bounds of the colormap (in that order); providing
``None`` for either entry will set the corresponding boundary at the
min/max of the data (separately for each topomap). Elements of the
:class:`tuple` may also be callable functions which take in a
:class:`NumPy array <numpy.ndarray>` and return a scalar.
If ``vlim='joint'``, will compute the colormap limits jointly across
all topomaps of the same channel type, using the min/max of the data.
Defaults to ``(None, None)``.
.. versionadded:: 0.21
"""
docdict['vmin_vmax_topomap'] = """
vmin, vmax : float | callable | None
Lower and upper bounds of the colormap, in the same units as the data.
If ``vmin`` and ``vmax`` are both ``None``, they are set at ± the
maximum absolute value of the data (yielding a colormap with midpoint
at 0). If only one of ``vmin``, ``vmax`` is ``None``, will use
``min(data)`` or ``max(data)``, respectively. If callable, should
accept a :class:`NumPy array <numpy.ndarray>` of data and return a
float.
"""
# %%
# W
docdict['weight_norm'] = """
weight_norm : str | None
Can be:
- ``None``
The unit-gain LCMV beamformer :footcite:`SekiharaNagarajan2008` will be
computed.
- ``'unit-noise-gain'``
The unit-noise gain minimum variance beamformer will be computed
(Borgiotti-Kaplan beamformer) :footcite:`SekiharaNagarajan2008`,
which is not rotation invariant when ``pick_ori='vector'``.
This should be combined with
:meth:`stc.project('pca') <mne.VectorSourceEstimate.project>` to follow
the definition in :footcite:`SekiharaNagarajan2008`.
- ``'nai'``
The Neural Activity Index :footcite:`VanVeenEtAl1997` will be computed,
which simply scales all values from ``'unit-noise-gain'`` by a fixed
value.
- ``'unit-noise-gain-invariant'``
Compute a rotation-invariant normalization using the matrix square
root. This differs from ``'unit-noise-gain'`` only when
``pick_ori='vector'``, creating a solution that:
1. Is rotation invariant (``'unit-noise-gain'`` is not);
2. Satisfies the first requirement from
:footcite:`SekiharaNagarajan2008` that ``w @ w.conj().T == I``,
whereas ``'unit-noise-gain'`` has non-zero off-diagonals; but
3. Does not satisfy the second requirement that ``w @ G.T = θI``,
which arguably does not make sense for a rotation-invariant
solution.
"""
docdict['window_psd'] = """
window : str | float | tuple
Windowing function to use. See :func:`scipy.signal.get_window`.
"""
docdict['window_resample'] = """
window : str | tuple
Frequency-domain window to use in resampling.
See :func:`scipy.signal.resample`.
"""
# %%
# X
docdict['xscale_plot_psd'] = """
xscale : str
Can be 'linear' (default) or 'log'.
"""
# %%
# Y
# %%
# Z
docdict_indented = {}
def fill_doc(f):
"""Fill a docstring with docdict entries.
Parameters
----------
f : callable
The function to fill the docstring of. Will be modified in place.
Returns
-------
f : callable
The function, potentially with an updated ``__doc__``.
"""
docstring = f.__doc__
if not docstring:
return f
lines = docstring.splitlines()
# Find the minimum indent of the main docstring, after first line
if len(lines) < 2:
icount = 0
else:
icount = _indentcount_lines(lines[1:])
# Insert this indent to dictionary docstrings
try:
indented = docdict_indented[icount]
except KeyError:
indent = ' ' * icount
docdict_indented[icount] = indented = {}
for name, dstr in docdict.items():
lines = dstr.splitlines()
try:
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(indent + line)
indented[name] = '\n'.join(newlines)
except IndexError:
indented[name] = dstr
try:
f.__doc__ = docstring % indented
except (TypeError, ValueError, KeyError) as exp:
funcname = f.__name__
funcname = docstring.split('\n')[0] if funcname is None else funcname
raise RuntimeError('Error documenting %s:\n%s'
% (funcname, str(exp)))
return f
##############################################################################
# Utilities for docstring manipulation.
def copy_doc(source):
"""Copy the docstring from another function (decorator).
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator.
This is useful when inheriting from a class and overloading a method. This
decorator can be used to copy the docstring of the original method.
Parameters
----------
source : function
Function to copy the docstring from
Returns
-------
wrapper : function
The decorated function
Examples
--------
>>> class A:
... def m1():
... '''Docstring for m1'''
... pass
>>> class B (A):
... @copy_doc(A.m1)
... def m1():
... ''' this gets appended'''
... pass
>>> print(B.m1.__doc__)
Docstring for m1 this gets appended
"""
def wrapper(func):
if source.__doc__ is None or len(source.__doc__) == 0:
raise ValueError('Cannot copy docstring: docstring was empty.')
doc = source.__doc__
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper
def copy_function_doc_to_method_doc(source):
"""Use the docstring from a function as docstring for a method.
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator. Additionally, the first parameter
specified in the docstring of the source function is removed in the new
docstring.
This decorator is useful when implementing a method that just calls a
function. This pattern is prevalent in for example the plotting functions
of MNE.
Parameters
----------
source : function
Function to copy the docstring from.
Returns
-------
wrapper : function
The decorated method.
Notes
-----
The parsing performed is very basic and will break easily on docstrings
that are not formatted exactly according to the ``numpydoc`` standard.
Always inspect the resulting docstring when using this decorator.
Examples
--------
>>> def plot_function(object, a, b):
... '''Docstring for plotting function.
...
... Parameters
... ----------
... object : instance of object
... The object to plot
... a : int
... Some parameter
... b : int
... Some parameter
... '''
... pass
...
>>> class A:
... @copy_function_doc_to_method_doc(plot_function)
... def plot(self, a, b):
... '''
... Notes
... -----
... .. versionadded:: 0.13.0
... '''
... plot_function(self, a, b)
>>> print(A.plot.__doc__)
Docstring for plotting function.
<BLANKLINE>
Parameters
----------
a : int
Some parameter
b : int
Some parameter
<BLANKLINE>
Notes
-----
.. versionadded:: 0.13.0
<BLANKLINE>
"""
def wrapper(func):
doc = source.__doc__.split('\n')
if len(doc) == 1:
doc = doc[0]
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
# Find parameter block
for line, text in enumerate(doc[:-2]):
if (text.strip() == 'Parameters' and
doc[line + 1].strip() == '----------'):
parameter_block = line
break
else:
# No parameter block found
raise ValueError('Cannot copy function docstring: no parameter '
'block found. To simply copy the docstring, use '
'the @copy_doc decorator instead.')
# Find first parameter
for line, text in enumerate(doc[parameter_block:], parameter_block):
if ':' in text:
first_parameter = line
parameter_indentation = len(text) - len(text.lstrip(' '))
break
else:
raise ValueError('Cannot copy function docstring: no parameters '
'found. To simply copy the docstring, use the '
'@copy_doc decorator instead.')
# Find end of first parameter
for line, text in enumerate(doc[first_parameter + 1:],
first_parameter + 1):
# Ignore empty lines
if len(text.strip()) == 0:
continue
line_indentation = len(text) - len(text.lstrip(' '))
if line_indentation <= parameter_indentation:
# Reach end of first parameter
first_parameter_end = line
# Of only one parameter is defined, remove the Parameters
# heading as well
if ':' not in text:
first_parameter = parameter_block
break
else:
# End of docstring reached
first_parameter_end = line
first_parameter = parameter_block
# Copy the docstring, but remove the first parameter
doc = ('\n'.join(doc[:first_parameter]) + '\n' +
'\n'.join(doc[first_parameter_end:]))
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper
def copy_base_doc_to_subclass_doc(subclass):
"""Use the docstring from a parent class methods in derived class.
The docstring of a parent class method is prepended to the
docstring of the method of the class wrapped by this decorator.
Parameters
----------
subclass : wrapped class
Class to copy the docstring to.
Returns
-------
subclass : Derived class
The decorated class with copied docstrings.
"""
ancestors = subclass.mro()[1:-1]
for source in ancestors:
methodList = [method for method in dir(source)
if callable(getattr(source, method))]
for method_name in methodList:
# discard private methods
if method_name[0] == '_':
continue
base_method = getattr(source, method_name)
sub_method = getattr(subclass, method_name)
if base_method is not None and sub_method is not None:
doc = base_method.__doc__
if sub_method.__doc__ is not None:
doc += '\n' + sub_method.__doc__
sub_method.__doc__ = doc
return subclass
def linkcode_resolve(domain, info):
"""Determine the URL corresponding to a Python object.
Parameters
----------
domain : str
Only useful when 'py'.
info : dict
With keys "module" and "fullname".
Returns
-------
url : str
The code URL.
Notes
-----
This has been adapted to deal with our "verbose" decorator.
Adapted from SciPy (doc/source/conf.py).
"""
import mne
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# deal with our decorators properly
while hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
fn = op.relpath(fn, start=op.dirname(mne.__file__))
fn = '/'.join(op.normpath(fn).split(os.sep)) # in case on Windows
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
if 'dev' in mne.__version__:
kind = 'main'
else:
kind = 'maint/%s' % ('.'.join(mne.__version__.split('.')[:2]))
return "http://github.com/mne-tools/mne-python/blob/%s/mne/%s%s" % (
kind, fn, linespec)
def open_docs(kind=None, version=None):
"""Launch a new web browser tab with the MNE documentation.
Parameters
----------
kind : str | None
Can be "api" (default), "tutorials", or "examples".
The default can be changed by setting the configuration value
MNE_DOCS_KIND.
version : str | None
Can be "stable" (default) or "dev".
The default can be changed by setting the configuration value
MNE_DOCS_VERSION.
"""
from .check import _check_option
from .config import get_config
if kind is None:
kind = get_config('MNE_DOCS_KIND', 'api')
help_dict = dict(api='python_reference.html', tutorials='tutorials.html',
examples='auto_examples/index.html')
_check_option('kind', kind, sorted(help_dict.keys()))
kind = help_dict[kind]
if version is None:
version = get_config('MNE_DOCS_VERSION', 'stable')
_check_option('version', version, ['stable', 'dev'])
webbrowser.open_new_tab('https://mne.tools/%s/%s' % (version, kind))
# Following deprecated class copied from scikit-learn
# force show of DeprecationWarning even on python 2.7
warnings.filterwarnings('always', category=DeprecationWarning, module='mne')
class deprecated:
"""Mark a function, class, or method as deprecated (decorator).
Originally adapted from sklearn and
http://wiki.python.org/moin/PythonDecoratorLibrary, then modified to make
arguments populate properly following our verbose decorator methods based
on decorator.
Parameters
----------
extra : str
Extra information beyond just saying the class/function/method
is deprecated.
"""
def __init__(self, extra=''): # noqa: D102
self.extra = extra
def __call__(self, obj): # noqa: D105
"""Call.
Parameters
----------
obj : object
Object to call.
Returns
-------
obj : object
The modified object.
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = f"Class {cls.__name__} is deprecated"
cls.__init__ = self._make_fun(cls.__init__, msg)
return cls
def _decorate_fun(self, fun):
"""Decorate function fun."""
msg = f"Function {fun.__name__} is deprecated"
return self._make_fun(fun, msg)
def _make_fun(self, function, msg):
if self.extra:
msg += "; %s" % self.extra
body = f"""\
def %(name)s(%(signature)s):\n
import warnings
warnings.warn({repr(msg)}, category=DeprecationWarning)
return _function_(%(shortsignature)s)"""
evaldict = dict(_function_=function)
fm = FunctionMaker(
function, None, None, None, None, function.__module__)
attrs = dict(__wrapped__=function, __qualname__=function.__qualname__,
__globals__=function.__globals__)
dep = fm.make(body, evaldict, addsource=True, **attrs)
dep.__doc__ = self._update_doc(dep.__doc__)
dep._deprecated_original = function
return dep
def _update_doc(self, olddoc):
newdoc = ".. warning:: DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
newdoc += '.'
if olddoc:
# Get the spacing right to avoid sphinx warnings
n_space = 4
for li, line in enumerate(olddoc.split('\n')):
if li > 0 and len(line.strip()):
n_space = len(line) - len(line.lstrip())
break
newdoc = "%s\n\n%s%s" % (newdoc, ' ' * n_space, olddoc)
return newdoc
def deprecated_alias(dep_name, func, removed_in=None):
"""Inject a deprecated alias into the namespace."""
if removed_in is None:
from .._version import __version__
removed_in = __version__.split('.')[:2]
removed_in[1] = str(int(removed_in[1]) + 1)
removed_in = '.'.join(removed_in)
# Inject a deprecated version into the namespace
inspect.currentframe().f_back.f_globals[dep_name] = deprecated(
f'{dep_name} has been deprecated in favor of {func.__name__} and will '
f'be removed in {removed_in}.'
)(deepcopy(func))
###############################################################################
# The following tools were adapted (mostly trimmed) from SciPy's doccer.py
def _docformat(docstring, docdict=None, funcname=None):
"""Fill a function docstring from variables in dictionary.
Adapt the indent of the inserted docs
Parameters
----------
docstring : string
docstring from function, possibly with dict formatting strings
docdict : dict, optional
dictionary with keys that match the dict formatting strings
and values that are docstring fragments to be inserted. The
indentation of the inserted docstrings is set to match the
minimum indentation of the ``docstring`` by adding this
indentation to all lines of the inserted string, except the
first
Returns
-------
outstring : string
string with requested ``docdict`` strings inserted
"""
if not docstring:
return docstring
if docdict is None:
docdict = {}
if not docdict:
return docstring
lines = docstring.expandtabs().splitlines()
# Find the minimum indent of the main docstring, after first line
if len(lines) < 2:
icount = 0
else:
icount = _indentcount_lines(lines[1:])
indent = ' ' * icount
# Insert this indent to dictionary docstrings
indented = {}
for name, dstr in docdict.items():
lines = dstr.expandtabs().splitlines()
try:
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(indent + line)
indented[name] = '\n'.join(newlines)
except IndexError:
indented[name] = dstr
funcname = docstring.split('\n')[0] if funcname is None else funcname
try:
return docstring % indented
except (TypeError, ValueError, KeyError) as exp:
raise RuntimeError('Error documenting %s:\n%s'
% (funcname, str(exp)))
def _indentcount_lines(lines):
"""Compute minimum indent for all lines in line list."""
indentno = sys.maxsize
for line in lines:
stripped = line.lstrip()
if stripped:
indentno = min(indentno, len(line) - len(stripped))
if indentno == sys.maxsize:
return 0
return indentno
| bsd-3-clause |
matheuszaglia/opensearchgeo | opensearch.py | 1 | 7247 | from flask import Flask, request, make_response, render_template, abort, jsonify, send_file
import inpe_data
import os
import io
import logging
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.logger_name = "opensearch"
handler = logging.FileHandler('errors.log')
handler.setFormatter(logging.Formatter(
'[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
))
app.logger.addHandler(handler)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.keep_trailing_newline = True
@app.route('/granule.<string:output>', methods=['GET'])
def os_granule(output):
data = []
total_results = 0
start_index = request.args.get('startIndex', 1)
count = request.args.get('count', 10)
if start_index == "":
start_index = 0
elif int(start_index) == 0:
abort(400, 'Invalid startIndex')
else:
start_index = int(start_index) - 1
if count == "":
count = 10
elif int(count) < 0:
abort(400, 'Invalid count')
else:
count = int(count)
try:
data = inpe_data.get_bbox(request.args.get('bbox', None),
request.args.get('uid', None),
request.args.get('path', None),
request.args.get('row', None),
request.args.get('start', None),
request.args.get('end', None),
request.args.get('radiometricProcessing', None),
request.args.get('type', None),
request.args.get('band', None),
request.args.get('dataset', None),
request.args.get('cloud', None),
start_index, count)
except inpe_data.InvalidBoundingBoxError:
abort(400, 'Invalid bounding box')
except IOError:
abort(503)
if output == 'json':
resp = jsonify(data)
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
resp = make_response(render_template('granule.{}'.format(output),
url=request.url.replace('&', '&'),
data=data, start_index=start_index, count=count,
url_root=os.environ.get('BASE_URL')))
if output == 'atom':
resp.content_type = 'application/atom+xml' + output
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.route('/collections.<string:output>')
def os_dataset(output):
abort(503) # disabled at the moment
total_results = 0
data = None
start_index = request.args.get('startIndex', 1)
count = request.args.get('count', 10)
if start_index == "":
start_index = 0
elif int(start_index) == 0:
abort(400, 'Invalid startIndex')
else:
start_index = int(start_index) - 1
if count == "":
count = 10
elif int(count) < 0:
abort(400, 'Invalid count')
else:
count = int(count)
try:
result = inpe_data.get_datasets(request.args.get('bbox', None),
request.args.get('searchTerms', None),
request.args.get('uid', None),
request.args.get('start', None),
request.args.get('end', None),
start_index, count)
data = result
except IOError:
abort(503)
resp = make_response(render_template('collections.' + output,
url=request.url.replace('&', '&'),
data=data, total_results=len(result),
start_index=start_index, count=count,
url_root=request.url_root,
updated=inpe_data.get_updated()
))
if output == 'atom':
output = 'atom+xml'
resp.content_type = 'application/' + output
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.route('/')
@app.route('/osdd')
@app.route('/osdd/granule')
def os_osdd_granule():
resp = make_response(render_template('osdd_granule.xml',
url=os.environ.get('BASE_URL'),
datasets=inpe_data.get_datasets(),
bands=inpe_data.get_bands(),
rps=inpe_data.get_radiometricProcessing(),
types=inpe_data.get_types()))
resp.content_type = 'application/xml'
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.route('/osdd/collection')
def os_osdd_collection():
resp = make_response(render_template('osdd_collection.xml', url=request.url_root))
resp.content_type = 'application/xml'
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.route('/browseimage/<string:sceneid>')
def browse_image(sceneid):
try:
image = inpe_data.get_browse_image(sceneid)
except IndexError:
abort(400, 'There is no browse image with the provided Scene ID.')
except Exception as e:
abort(503, str(e))
return send_file(io.BytesIO(image), mimetype='image/jpeg')
@app.route('/metadata/<string:sceneid>')
def scene(sceneid):
try:
data, result_len = inpe_data.get_bbox(uid=sceneid)
data[0]['browseURL'] = request.url_root + data[0]['browseURL']
except Exception as e:
abort(503, str(e))
return jsonify(data)
@app.errorhandler(400)
def handle_bad_request(e):
resp = jsonify({'code': 400, 'message': 'Bad Request - {}'.format(e.description)})
resp.status_code = 400
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.errorhandler(404)
def handle_page_not_found(e):
resp = jsonify({'code': 404, 'message': 'Page not found'})
resp.status_code = 404
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.errorhandler(500)
def handle_api_error(e):
resp = jsonify({'code': 500, 'message': 'Internal Server Error'})
resp.status_code = 500
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.errorhandler(502)
def handle_bad_gateway_error(e):
resp = jsonify({'code': 502, 'message': 'Bad Gateway'})
resp.status_code = 502
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.errorhandler(503)
def handle_service_unavailable_error(e):
resp = jsonify({'code': 503, 'message': 'Service Unavailable'})
resp.status_code = 503
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
@app.errorhandler(Exception)
def handle_exception(e):
app.logger.exception(e)
resp = jsonify({'code': 500, 'message': 'Internal Server Error'})
resp.status_code = 500
resp.headers.add('Access-Control-Allow-Origin', '*')
return resp
| gpl-3.0 |
herilalaina/scikit-learn | benchmarks/bench_lof.py | 28 | 3492 | """
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
Note that LocalOutlierFactor is not meant to predict on a test set and its
performance is assessed in an outlier detection context:
1. The model is trained on the whole dataset which is assumed to contain
outliers.
2. The ROC curve is computed on the same dataset using the knowledge of the
labels.
In this context there is no need to shuffle the dataset because the model
is trained and tested on the whole dataset. The randomness of this benchmark
is only caused by the random selection of anomalies in the SA dataset.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
print(__doc__)
random_state = 2 # to control the random selection of anomalies in SA
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
plt.figure()
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, percent10=True,
random_state=random_state)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype()
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != b'normal.').astype(int)
X = X.astype(float)
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X)
fit_time = time() - tstart
scoring = -model.negative_outlier_factor_ # the lower, the more normal
fpr, tpr, thresholds = roc_curve(y, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs)'
% (dataset_name, AUC, fit_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
pravsripad/mne-python | mne/io/artemis123/tests/test_artemis123.py | 10 | 4658 |
# Author: Luke Bloy <bloyl@chop.edu>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
from mne.io import read_raw_artemis123
from mne.io.tests.test_raw import _test_raw_reader
from mne.datasets import testing
from mne.io.artemis123.utils import _generate_mne_locs_file, _load_mne_locs
from mne import pick_types
from mne.transforms import rot_to_quat, _angle_between_quats
from mne.io.constants import FIFF
artemis123_dir = op.join(testing.data_path(download=False), 'ARTEMIS123')
short_HPI_dip_fname = op.join(artemis123_dir,
'Artemis_Data_2017-04-04-15h-44m-' +
'22s_Motion_Translation-z.bin')
dig_fname = op.join(artemis123_dir, 'Phantom_040417_dig.pos')
short_hpi_1kz_fname = op.join(artemis123_dir, 'Artemis_Data_2017-04-14-10h' +
'-38m-59s_Phantom_1k_HPI_1s.bin')
# XXX this tol is way too high, but it's not clear which is correct
# (old or new)
def _assert_trans(actual, desired, dist_tol=0.017, angle_tol=5.):
__tracebackhide__ = True
trans_est = actual[0:3, 3]
quat_est = rot_to_quat(actual[0:3, 0:3])
trans = desired[0:3, 3]
quat = rot_to_quat(desired[0:3, 0:3])
angle = np.rad2deg(_angle_between_quats(quat_est, quat))
dist = np.linalg.norm(trans - trans_est)
assert dist <= dist_tol, \
'%0.3f > %0.3f mm translation' % (1000 * dist, 1000 * dist_tol)
assert angle <= angle_tol, \
'%0.3f > %0.3f° rotation' % (angle, angle_tol)
@pytest.mark.timeout(60) # ~25 sec on Travis Linux OpenBLAS
@testing.requires_testing_data
def test_artemis_reader():
"""Test reading raw Artemis123 files."""
_test_raw_reader(read_raw_artemis123, input_fname=short_hpi_1kz_fname,
pos_fname=dig_fname, verbose='error')
@pytest.mark.timeout(60)
@testing.requires_testing_data
def test_dev_head_t():
"""Test dev_head_t computation for Artemis123."""
# test a random selected point
raw = read_raw_artemis123(short_hpi_1kz_fname, preload=True,
add_head_trans=False)
meg_picks = pick_types(raw.info, meg=True, eeg=False)
# checked against matlab reader.
assert_allclose(raw[meg_picks[12]][0][0][123], 1.08239606023e-11)
dev_head_t_1 = np.array([[9.713e-01, 2.340e-01, -4.164e-02, 1.302e-04],
[-2.371e-01, 9.664e-01, -9.890e-02, 1.977e-03],
[1.710e-02, 1.059e-01, 9.942e-01, -8.159e-03],
[0.0, 0.0, 0.0, 1.0]])
dev_head_t_2 = np.array([[9.890e-01, 1.475e-01, -8.090e-03, 4.997e-04],
[-1.476e-01, 9.846e-01, -9.389e-02, 1.962e-03],
[-5.888e-03, 9.406e-02, 9.955e-01, -1.610e-02],
[0.0, 0.0, 0.0, 1.0]])
expected_dev_hpi_rr = np.array([[-0.01579644, 0.06527367, 0.00152648],
[0.06666813, 0.0148956, 0.00545488],
[-0.06699212, -0.01732376, 0.0112027]])
# test with head loc no digitization
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
assert_equal(raw.info['sfreq'], 5000.0)
# test with head loc and digitization
with pytest.warns(RuntimeWarning, match='Large difference'):
raw = read_raw_artemis123(short_HPI_dip_fname, add_head_trans=True,
pos_fname=dig_fname)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_1)
# test cHPI localization..
dev_hpi_rr = np.array([p['r'] for p in raw.info['dig']
if p['coord_frame'] == FIFF.FIFFV_COORD_DEVICE])
# points should be within 0.1 mm (1e-4m) and within 1%
assert_allclose(dev_hpi_rr, expected_dev_hpi_rr, atol=1e-4, rtol=0.01)
# test 1kz hpi head loc (different freq)
raw = read_raw_artemis123(short_hpi_1kz_fname, add_head_trans=True)
_assert_trans(raw.info['dev_head_t']['trans'], dev_head_t_2)
assert_equal(raw.info['sfreq'], 1000.0)
def test_utils(tmp_path):
"""Test artemis123 utils."""
# make a tempfile
tmp_dir = str(tmp_path)
tmp_fname = op.join(tmp_dir, 'test_gen_mne_locs.csv')
_generate_mne_locs_file(tmp_fname)
installed_locs = _load_mne_locs()
generated_locs = _load_mne_locs(tmp_fname)
assert_equal(set(installed_locs.keys()), set(generated_locs.keys()))
for key in installed_locs.keys():
assert_allclose(installed_locs[key], generated_locs[key], atol=1e-7)
| bsd-3-clause |
schets/scikit-learn | examples/ensemble/plot_forest_iris.py | 332 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
schets/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 37 | 7136 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'rf__n_estimators': [20, 200]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
expect = [0.953, 0.960, 0.960, 0.953]
scores = [mean_score for params, mean_score, scores in grid.grid_scores_]
for e, s in zip(expect, scores):
assert_almost_equal(e, s, decimal=3)
| bsd-3-clause |
pravsripad/mne-python | mne/gui/tests/test_ieeg_locate_gui.py | 2 | 7792 | # -*- coding: utf-8 -*-
# Authors: Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD-3-clause
import os.path as op
import numpy as np
from numpy.testing import assert_allclose
import pytest
import mne
from mne.datasets import testing
from mne.transforms import apply_trans
from mne.utils import requires_nibabel, requires_version, use_log_level
from mne.viz.utils import _fake_click
data_path = testing.data_path(download=False)
subject = 'sample'
subjects_dir = op.join(data_path, 'subjects')
sample_dir = op.join(data_path, 'MEG', subject)
raw_path = op.join(sample_dir, 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')
@requires_nibabel()
@pytest.fixture
def _fake_CT_coords(skull_size=5, contact_size=2):
"""Make somewhat realistic CT data with contacts."""
import nibabel as nib
brain = nib.load(
op.join(subjects_dir, subject, 'mri', 'brain.mgz'))
verts = mne.read_surface(
op.join(subjects_dir, subject, 'bem', 'outer_skull.surf'))[0]
verts = apply_trans(np.linalg.inv(brain.header.get_vox2ras_tkr()), verts)
x, y, z = np.array(brain.shape).astype(int) // 2
coords = [(x, y - 14, z), (x - 10, y - 15, z),
(x - 20, y - 16, z + 1), (x - 30, y - 16, z + 1)]
center = np.array(brain.shape) / 2
# make image
np.random.seed(99)
ct_data = np.random.random(brain.shape).astype(np.float32) * 100
# make skull
for vert in verts:
x, y, z = np.round(vert).astype(int)
ct_data[slice(x - skull_size, x + skull_size + 1),
slice(y - skull_size, y + skull_size + 1),
slice(z - skull_size, z + skull_size + 1)] = 1000
# add electrode with contacts
for (x, y, z) in coords:
# make sure not in skull
assert np.linalg.norm(center - np.array((x, y, z))) < 50
ct_data[slice(x - contact_size, x + contact_size + 1),
slice(y - contact_size, y + contact_size + 1),
slice(z - contact_size, z + contact_size + 1)] = \
1000 - np.linalg.norm(np.array(np.meshgrid(
*[range(-contact_size, contact_size + 1)] * 3)), axis=0)
ct = nib.MGHImage(ct_data, brain.affine)
coords = apply_trans(ct.header.get_vox2ras_tkr(), np.array(coords))
return ct, coords
@pytest.fixture
def _locate_ieeg(renderer_interactive_pyvistaqt):
# Use a fixture to create these classes so we can ensure that they
# are closed at the end of the test
guis = list()
def fun(*args, **kwargs):
guis.append(mne.gui.locate_ieeg(*args, **kwargs))
return guis[-1]
yield fun
for gui in guis:
try:
gui.close()
except Exception:
pass
def test_ieeg_elec_locate_gui_io(_locate_ieeg):
"""Test the input/output of the intracranial location GUI."""
import nibabel as nib
info = mne.create_info([], 1000)
aligned_ct = nib.MGHImage(np.zeros((256, 256, 256), dtype=np.float32),
np.eye(4))
trans = mne.transforms.Transform('head', 'mri')
with pytest.raises(ValueError,
match='No channels found in `info` to locate'):
_locate_ieeg(info, trans, aligned_ct, subject, subjects_dir)
@requires_version('sphinx_gallery')
@testing.requires_testing_data
def test_locate_scraper(_locate_ieeg, _fake_CT_coords, tmp_path):
"""Test sphinx-gallery scraping of the GUI."""
raw = mne.io.read_raw_fif(raw_path)
raw.pick_types(eeg=True)
ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2',
'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'}
raw.pick_channels(list(ch_dict.keys()))
raw.rename_channels(ch_dict)
raw.set_montage(None)
aligned_ct, _ = _fake_CT_coords
trans = mne.read_trans(fname_trans)
with pytest.warns(RuntimeWarning, match='`pial` surface not found'):
gui = _locate_ieeg(raw.info, trans, aligned_ct,
subject=subject, subjects_dir=subjects_dir)
(tmp_path / '_images').mkdir()
image_path = str(tmp_path / '_images' / 'temp.png')
gallery_conf = dict(builder_name='html', src_dir=str(tmp_path))
block_vars = dict(
example_globals=dict(gui=gui),
image_path_iterator=iter([image_path]))
assert not op.isfile(image_path)
assert not getattr(gui, '_scraped', False)
mne.gui._GUIScraper()(None, block_vars, gallery_conf)
assert op.isfile(image_path)
assert gui._scraped
@testing.requires_testing_data
def test_ieeg_elec_locate_gui_display(_locate_ieeg, _fake_CT_coords):
"""Test that the intracranial location GUI displays properly."""
raw = mne.io.read_raw_fif(raw_path, preload=True)
raw.pick_types(eeg=True)
ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2',
'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'}
raw.pick_channels(list(ch_dict.keys()))
raw.rename_channels(ch_dict)
raw.set_eeg_reference('average')
raw.set_channel_types({name: 'seeg' for name in raw.ch_names})
raw.set_montage(None)
aligned_ct, coords = _fake_CT_coords
trans = mne.read_trans(fname_trans)
with pytest.warns(RuntimeWarning, match='`pial` surface not found'):
gui = _locate_ieeg(raw.info, trans, aligned_ct,
subject=subject, subjects_dir=subjects_dir,
verbose=True)
with pytest.raises(ValueError, match='read-only'):
gui._ras[:] = coords[0] # start in the right position
gui._set_ras(coords[0])
gui._mark_ch()
assert not gui._lines and not gui._lines_2D # no lines for one contact
for ci, coord in enumerate(coords[1:], 1):
coord_vox = apply_trans(gui._ras_vox_t, coord)
with use_log_level('debug'):
_fake_click(gui._figs[2], gui._figs[2].axes[0],
coord_vox[:-1], xform='data', kind='release')
assert_allclose(coord[:2], gui._ras[:2], atol=0.1,
err_msg=f'coords[{ci}][:2]')
assert_allclose(coord[2], gui._ras[2], atol=2,
err_msg=f'coords[{ci}][2]')
gui._mark_ch()
# ensure a 3D line was made for each group
assert len(gui._lines) == 2
# test snap to center
gui._ch_index = 0
gui._set_ras(coords[0]) # move to first position
gui._mark_ch()
assert_allclose(coords[0], gui._chs['LAMY 1'], atol=0.2)
gui._snap_button.click()
assert gui._snap_button.text() == 'Off'
# now make sure no snap happens
gui._ch_index = 0
gui._set_ras(coords[1] + 1)
gui._mark_ch()
assert_allclose(coords[1] + 1, gui._chs['LAMY 1'], atol=0.01)
# check that it turns back on
gui._snap_button.click()
assert gui._snap_button.text() == 'On'
# test remove
gui._ch_index = 1
gui._update_ch_selection()
gui._remove_ch()
assert np.isnan(gui._chs['LAMY 2']).all()
# check that raw object saved
assert not np.isnan(raw.info['chs'][0]['loc'][:3]).any() # LAMY 1
assert np.isnan(raw.info['chs'][1]['loc'][:3]).all() # LAMY 2 (removed)
# move sliders
gui._alpha_slider.setValue(75)
assert gui._ch_alpha == 0.75
gui._radius_slider.setValue(5)
assert gui._radius == 5
ct_sum_before = np.nansum(gui._images['ct'][0].get_array().data)
gui._ct_min_slider.setValue(500)
assert np.nansum(gui._images['ct'][0].get_array().data) < ct_sum_before
# test buttons
gui._toggle_show_brain()
assert 'mri' in gui._images
assert 'local_max' not in gui._images
gui._toggle_show_max()
assert 'local_max' in gui._images
assert 'mip' not in gui._images
gui._toggle_show_mip()
assert 'mip' in gui._images
assert 'mip_chs' in gui._images
assert len(gui._lines_2D) == 1 # LAMY only has one contact
| bsd-3-clause |
schets/scikit-learn | examples/svm/plot_oneclass.py | 248 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
diyclassics/cltk | src/cltk/alphabet/fro.py | 4 | 2481 | """The normalizer aims to maximally reduce the variation between the orthography of texts written in the Anglo-Norman dialect
to bring it in line with “orthographe commune”. It is heavily inspired by Pope (1956).
Spelling variation is not consistent enough to ensure the highest accuracy; the normalizer in its current format should
therefore be used as a last resort.
The normalizer, word tokenizer, stemmer, lemmatizer, and list of stopwords for OF/MF were developed as part of Google Summer of Code 2017.
A full write-up of this work can be found at : https://gist.github.com/nat1881/6f134617805e2efbe5d275770e26d350
**References :** Pope, M.K. 1956. From Latin to Modern French with Especial Consideration of Anglo-Norman. Manchester: MUP.
Anglo-French spelling variants normalized to "orthographe commune", from M. K. Pope (1956)
- word-final d - e.g. vertud vs vertu
- use of <u> over <ou>
- <eaus> for <eus>, <ceaus> for <ceus>
- triphtongs:
- <iu> for <ieu>
- <u> for <eu>
- <ie> for <iee>
- <ue> for <uee>
- <ure> for <eure>
- "epenthetic vowels" - e.g. averai for avrai
- <eo> for <o>
- <iw>, <ew> for <ieux>
- final <a> for <e>
"""
import re
from typing import List
FRO_PATTERNS = [
("eaus$", "eus"),
("ceaus$", "ceus"),
("iu", "ieu"),
("((?<!^)|(?<!(e)))u(?!$)", "eu"),
("ie$", "iee"),
("ue$", "uee"),
("ure$", "eure"),
("eo$", "o"),
("iw$", "ieux"),
("ew$", "ieux"),
("a$", "e"),
("^en", "an"),
("d$", ""),
]
def build_match_and_apply_functions(pattern, replace):
"""Assemble regex patterns."""
def matches_rule(word):
return re.search(pattern, word)
def apply_rule(word):
return re.sub(pattern, replace, word)
return matches_rule, apply_rule
def normalize_fr(tokens: List[str]) -> List[str]:
"""Normalize Old and Middle French tokens.
TODO: Make work work again with a tokenizer.
"""
# from cltk.tokenizers.word import WordTokenizer
# string = string.lower()
# word_tokenizer = WordTokenizer("fro")
# tokens = word_tokenizer.tokenize(string)
rules = [
build_match_and_apply_functions(pattern, replace)
for (pattern, replace) in FRO_PATTERNS
]
normalized_text = []
for token in tokens:
for matches_rule, apply_rule in rules:
if matches_rule(token):
normalized = apply_rule(token)
normalized_text.append(normalized)
return normalized_text
| mit |
rflamary/POT | test/test_dr.py | 1 | 1328 | """Tests for module dr on Dimensionality Reduction """
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import ot
import pytest
try: # test if autograd and pymanopt are installed
import ot.dr
nogo = False
except ImportError:
nogo = True
@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)")
def test_fda():
n_samples = 90 # nb samples in source and target datasets
np.random.seed(0)
# generate gaussian dataset
xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples)
n_features_noise = 8
xs = np.hstack((xs, np.random.randn(n_samples, n_features_noise)))
p = 1
Pfda, projfda = ot.dr.fda(xs, ys, p)
projfda(xs)
np.testing.assert_allclose(np.sum(Pfda**2, 0), np.ones(p))
@pytest.mark.skipif(nogo, reason="Missing modules (autograd or pymanopt)")
def test_wda():
n_samples = 100 # nb samples in source and target datasets
np.random.seed(0)
# generate gaussian dataset
xs, ys = ot.datasets.make_data_classif('gaussrot', n_samples)
n_features_noise = 8
xs = np.hstack((xs, np.random.randn(n_samples, n_features_noise)))
p = 2
Pwda, projwda = ot.dr.wda(xs, ys, p, maxiter=10)
projwda(xs)
np.testing.assert_allclose(np.sum(Pwda**2, 0), np.ones(p))
| mit |
marctc/django | tests/generic_views/test_list.py | 306 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
herilalaina/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 373 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
schets/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 7 | 11398 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1, make_regression
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
marmarko/ml101 | tensorflow/examples/skflow/iris.py | 25 | 1649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
herilalaina/scikit-learn | sklearn/datasets/species_distributions.py | 20 | 8840 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
`"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
Notes
-----
For an example of using this dataset, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs, remove
from os.path import exists
import sys
import logging
import numpy as np
from .base import get_data_home
from .base import _fetch_remote
from .base import RemoteFileMetadata
from ..utils import Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
PY3_OR_LATER = sys.version_info[0] >= 3
# The original data can be found at:
# http://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip
SAMPLES = RemoteFileMetadata(
filename='samples.zip',
url='https://ndownloader.figshare.com/files/5976075',
checksum=('abb07ad284ac50d9e6d20f1c4211e0fd'
'3c098f7f85955e89d321ee8efe37ac28'))
# The original data can be found at:
# http://biodiversityinformatics.amnh.org/open_source/maxent/coverages.zip
COVERAGES = RemoteFileMetadata(
filename='coverages.zip',
url='https://ndownloader.figshare.com/files/5976078',
checksum=('4d862674d72e79d6cee77e63b98651ec'
'7926043ba7d39dcb31329cf3f6073807'))
DATA_ARCHIVE_NAME = "species_coverage.pkz"
logger = logging.getLogger(__name__)
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY3_OR_LATER:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
else:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading species data from %s to %s' % (
SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X: # samples.zip is a valid npz
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
remove(samples_path)
logger.info('Downloading coverage data from %s to %s' % (
COVERAGES.url, data_home))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X: # coverages.zip is a valid npz
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
schets/scikit-learn | sklearn/tests/test_isotonic.py | 16 | 11166 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
euirim/maple | maple.py | 1 | 3981 | #!/usr/bin/env python
"""Maple: automatically summarizes given text using
a modified version of the TextRank algorithm."""
import sys
import codecs
import pickle
import string
import nltk
from nltk.corpus import wordnet
from nltk.tag import pos_tag
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from tests.tests_visual import test_summarizer, tests_simple, tests_diverse
from tests.tests_field import generate_test_files
from tests import tests_alpha
def field_test():
generate_test_files("~/Documents/summ_test_files/selected")
def alpha_test():
tests_alpha.generate_test_files("output")
def test(simple=True):
if simple:
print("********* MAPLE'S SIMPLE TESTS *********\n")
tests_simple()
else:
print("********* MAPLE'S DIVERSE TESTS *********\n")
tests_diverse()
print("********* TESTS COMPLETED *********")
def train(filename, stem=True):
"""
Given file to use as unsupervised data, train tfidfvectorizer and punkt
sentence tokenizer and output to pickle in data directory.
"""
text = codecs.open(filename, "rb", "utf8").read()
abbreviations = [
"u.s.a", "fig", "gov", "sen", "jus", "jdg", "rep", "pres",
"mr", "mrs", "ms", "h.r", "s.", "h.b", "s.b", "u.k", "u.n",
"u.s.s.r", "u.s",
]
print("TRAINING SENTENCE TOKENIZER...")
pst = PunktSentenceTokenizer()
pst.train(text.replace("\n\n", " "))
# add extra abbreviations
pst._params.abbrev_types.update(abbreviations)
print("TRAINED ABBREVIATIONS: \n{}".format(pst._params.abbrev_types))
# stemming
if stem:
wnl = WordNetLemmatizer()
print("WORD TOKENIZING TEXT")
tokens = nltk.word_tokenize(text)
# pos tagging
print("POS TAGGING TEXT...")
tagged_tokens = pos_tag(tokens)
print("STEMMING TRAINING TEXT...")
for i, tok in enumerate(tagged_tokens):
position = None
if tok[1] == "NN" or tok[1] == "NNS" or tok[1] == "NNPS":
position = wordnet.NOUN
elif "JJ" in tok[1]:
position = wordnet.ADJ
elif "VB" in tok[1]:
position = wordnet.VERB
elif "RB" in tok[1]:
position = wordnet.ADV
if position:
tokens[i] = wnl.lemmatize(tok[0], position)
if i % 1000000 == 0:
print("TOKEN: {}".format(i))
text = "".join([("" if tok in string.punctuation else " ")+tok
for tok in tokens])
text = text.strip()
print("TRAINING VECTORIZER...")
tfv = TfidfVectorizer()
tfv.fit(pst.tokenize(text))
# export trained tokenizer + vectorizer
print("EXPORTING TRAINED TOKENIZER + VECTORIZER...")
if stem:
punkt_out_filename = "data/punkt_stem.pk"
tfidf_out_filename = "data/tfidf_stem.pk"
else:
punkt_out_filename = "data/punkt.pk"
tfidf_out_filename = "data/tfidf.pk"
with open(punkt_out_filename, "wb") as pst_out:
pickle.dump(pst, pst_out)
with open(tfidf_out_filename, "wb") as tfv_out:
pickle.dump(tfv, tfv_out)
print("EXPORTING COMPLETED")
return
def main(argv):
if argv[0] == "-t":
try:
test(bool(int(argv[1])))
return 0
except:
print("Enter True or False as second parameter for testing.\n")
return 1
elif (len(argv) > 3) or (len(argv) < 3) or (argv[0] == "-h"):
print("./maple.py (optional -test true or false) <filename>"
" <max_units> <units (-p or -s)>")
return 1
if argv[2] == "-p":
paragraphs = True
else:
paragraphs = False
test_summarizer(filename)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
schets/scikit-learn | sklearn/svm/base.py | 12 | 33517 | from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin
from ..preprocessing import LabelEncoder
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
if (self.kernel in ['poly', 'rbf']) and (self.gamma == 0):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return np.asarray(y, dtype=np.float64, order='C')
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(BaseLibSVM, ClassifierMixin):
"""ABC for LibSVM-based classifiers."""
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when"
" probability=%r" % self.probability)
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s'"
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("loss='%s' and penalty='%s'"
"are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'auto'}, optional
Weight assigned to each class. If class_weight provided is 'auto',
then the weights provided are inverses of the frequency in the
target vector.
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| bsd-3-clause |
herilalaina/scikit-learn | examples/multioutput/plot_classifier_chain_yeast.py | 29 | 4547 | """
============================
Classifier Chain
============================
Example of using classifier chain on a multilabel dataset.
For this example we will use the `yeast
<http://mldata.org/repository/data/viewslug/yeast>`_ dataset which contains
2417 datapoints each with 103 features and 14 possible labels. Each
data point has at least one label. As a baseline we first train a logistic
regression classifier for each of the 14 labels. To evaluate the performance of
these classifiers we predict on a held-out test set and calculate the
:ref:`jaccard similarity score <jaccard_similarity_score>`.
Next we create 10 classifier chains. Each classifier chain contains a
logistic regression model for each of the 14 labels. The models in each
chain are ordered randomly. In addition to the 103 features in the dataset,
each model gets the predictions of the preceding models in the chain as
features (note that by default at training time each model gets the true
labels as features). These additional features allow each chain to exploit
correlations among the classes. The Jaccard similarity score for each chain
tends to be greater than that of the set independent logistic models.
Because the models in each chain are arranged randomly there is significant
variation in performance among the chains. Presumably there is an optimal
ordering of the classes in a chain that will yield the best performance.
However we do not know that ordering a priori. Instead we can construct an
voting ensemble of classifier chains by averaging the binary predictions of
the chains and apply a threshold of 0.5. The Jaccard similarity score of the
ensemble is greater than that of the independent models and tends to exceed
the score of each chain in the ensemble (although this is not guaranteed
with randomly ordered chains).
"""
print(__doc__)
# Author: Adam Kleczewski
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.multioutput import ClassifierChain
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_similarity_score
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import fetch_mldata
# Load a multi-label dataset
yeast = fetch_mldata('yeast')
X = yeast['data']
Y = yeast['target'].transpose().toarray()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
random_state=0)
# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
ovr = OneVsRestClassifier(LogisticRegression())
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_similarity_score(Y_test, Y_pred_ovr)
# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(LogisticRegression(), order='random', random_state=i)
for i in range(10)]
for chain in chains:
chain.fit(X_train, Y_train)
Y_pred_chains = np.array([chain.predict(X_test) for chain in
chains])
chain_jaccard_scores = [jaccard_similarity_score(Y_test, Y_pred_chain >= .5)
for Y_pred_chain in Y_pred_chains]
Y_pred_ensemble = Y_pred_chains.mean(axis=0)
ensemble_jaccard_score = jaccard_similarity_score(Y_test,
Y_pred_ensemble >= .5)
model_scores = [ovr_jaccard_score] + chain_jaccard_scores
model_scores.append(ensemble_jaccard_score)
model_names = ('Independent',
'Chain 1',
'Chain 2',
'Chain 3',
'Chain 4',
'Chain 5',
'Chain 6',
'Chain 7',
'Chain 8',
'Chain 9',
'Chain 10',
'Ensemble')
x_pos = np.arange(len(model_names))
# Plot the Jaccard similarity scores for the independent model, each of the
# chains, and the ensemble (note that the vertical axis on this plot does
# not begin at 0).
fig, ax = plt.subplots(figsize=(7, 4))
ax.grid(True)
ax.set_title('Classifier Chain Ensemble Performance Comparison')
ax.set_xticks(x_pos)
ax.set_xticklabels(model_names, rotation='vertical')
ax.set_ylabel('Jaccard Similarity Score')
ax.set_ylim([min(model_scores) * .9, max(model_scores) * 1.1])
colors = ['r'] + ['b'] * len(chain_jaccard_scores) + ['g']
ax.bar(x_pos, model_scores, alpha=0.5, color=colors)
plt.tight_layout()
plt.show()
| bsd-3-clause |
JoaquimPatriarca/senpy-for-gis | gasp/torst/gdal.py | 1 | 4037 | """
Feature Class to Raster Dataset
"""
def shp_to_raster(shp, cellsize, nodata, outRaster, epsg=None):
"""
Feature Class to Raster
"""
from osgeo import gdal
from osgeo import ogr
from gasp.gdal import get_driver_name
if not epsg:
from gasp.gdal.proj import get_shp_sref
srs = get_shp_sref(shp).ExportToWkt()
else:
from gasp.gdal.proj import epsg_to_wkt
srs = epsg_to_wkt(epsg)
# Get Extent
dtShp = ogr.GetDriverByName(
get_driver_name(shp)).Open(shp, 0)
lyr = dtShp.GetLayer()
x_min, x_max, y_min, y_max = lyr.GetExtent()
# Create output
x_res = int((x_max - x_min) / cellsize)
y_res = int((y_max - y_min) / cellsize)
dtRst = gdal.GetDriverByName(
get_driver_name(outRaster)).Create(
outRaster, x_res, y_res, gdal.GDT_Byte)
dtRst.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
dtRst.SetProjection(srs)
bnd = dtRst.GetRasterBand(1)
bnd.SetNoDataValue(nodata)
gdal.RasterizeLayer(dtRst, [1], lyr, burn_values=[1])
return outRaster
def array_to_raster(inArray, outRst, template, epsg, data_type, noData=None):
"""
Send Array to Raster
"""
from osgeo import gdal
from osgeo import osr
from gasp.gdal import get_driver_name
img_template = gdal.Open(template)
geo_transform = img_template.GetGeoTransform()
rows, cols = inArray.shape
driver = gdal.GetDriverByName(get_driver_name(outRst))
out = driver.Create(outRst, cols, rows, 1, data_type)
out.SetGeoTransform(geo_transform)
outBand = out.GetRasterBand(1)
if noData:
outBand.SetNoDataValue(noData)
outBand.WriteArray(inArray)
if epsg:
outRstSRS = osr.SpatialReference()
outRstSRS.ImportFromEPSG(epsg)
out.SetProjection(outRstSRS.ExportToWkt())
outBand.FlushCache()
return outRst
"""
Change data format
"""
def gdal_translate(inRst, outRst):
"""
Convert a raster file to another raster format
"""
from gasp.gdal import get_driver_name
from gasp.oss.shell import execute_cmd
outDrv = get_driver_name(outRst)
cmd = 'gdal_translate -of {drv} {_in} {_out}'.format(
drv=outDrv, _in=inRst, _out=outRst
)
cmdout = execute_cmd(cmd)
return outRst
def folder_nc_to_tif(inFolder, outFolder):
"""
Convert all nc existing on a folder to GTiff
"""
import netCDF4
import os
from gasp.oss.info import list_files
from gasp.gdal.bands import gdal_split_bands
# List nc files
lst_nc = list_files(inFolder, file_format='.nc')
# nc to tiff
for nc in lst_nc:
# Check the number of images in nc file
datasets = []
_nc = netCDF4.Dataset(nc, 'r')
for v in _nc.variables:
if v == 'lat' or v == 'lon':
continue
lshape = len(_nc.variables[v].shape)
if lshape >= 2:
datasets.append(v)
# if the nc has any raster
if len(datasets) == 0:
continue
# if the nc has only one raster
elif len(datasets) == 1:
output = os.path.join(
outFolder,
os.path.basename(os.path.splitext(nc)[0]) + '.tif'
)
gdal_translate(nc, output)
gdal_split_bands(output, outFolder)
# if the nc has more than one raster
else:
for dts in datasets:
output = os.path.join(
outFolder,
'{orf}_{v}.tif'.format(
orf = os.path.basename(os.path.splitext(nc)[0]),
v = dts
)
)
gdal_translate(
'NETCDF:"{n}":{v}'.format(n=nc, v=dts),
output
)
gdal_split_bands(output, outFolder)
| gpl-3.0 |
pravsripad/mne-python | examples/time_frequency/compute_csd.py | 6 | 3598 | # -*- coding: utf-8 -*-
"""
.. _ex-csd-matrix:
=============================================
Compute a cross-spectral density (CSD) matrix
=============================================
A cross-spectral density (CSD) matrix is similar to a covariance matrix, but in
the time-frequency domain. It is the first step towards computing
sensor-to-sensor coherence or a DICS beamformer.
This script demonstrates the three methods that MNE-Python provides to compute
the CSD:
1. Using short-term Fourier transform: :func:`mne.time_frequency.csd_fourier`
2. Using a multitaper approach: :func:`mne.time_frequency.csd_multitaper`
3. Using Morlet wavelets: :func:`mne.time_frequency.csd_morlet`
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# License: BSD-3-Clause
# %%
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import csd_fourier, csd_multitaper, csd_morlet
print(__doc__)
# %%
# In the following example, the computation of the CSD matrices can be
# performed using multiple cores. Set ``n_jobs`` to a value >1 to select the
# number of cores to use.
n_jobs = 1
# %%
# Loading the sample dataset.
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
fname_raw = meg_path / 'sample_audvis_raw.fif'
fname_event = meg_path / 'sample_audvis_raw-eve.fif'
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# %%
# By default, CSD matrices are computed using all MEG/EEG channels. When
# interpreting a CSD matrix with mixed sensor types, be aware that the
# measurement units, and thus the scalings, differ across sensors. In this
# example, for speed and clarity, we select a single channel type:
# gradiometers.
picks = mne.pick_types(raw.info, meg='grad')
# Make some epochs, based on events with trigger code 1
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=1,
picks=picks, baseline=(None, 0),
reject=dict(grad=4000e-13), preload=True)
# %%
# Computing CSD matrices using short-term Fourier transform and (adaptive)
# multitapers is straightforward:
csd_fft = csd_fourier(epochs, fmin=15, fmax=20, n_jobs=n_jobs)
csd_mt = csd_multitaper(epochs, fmin=15, fmax=20, adaptive=True, n_jobs=n_jobs)
# %%
# When computing the CSD with Morlet wavelets, you specify the exact
# frequencies at which to compute it. For each frequency, a corresponding
# wavelet will be constructed and convolved with the signal, resulting in a
# time-frequency decomposition.
#
# The CSD is constructed by computing the correlation between the
# time-frequency representations between all sensor-to-sensor pairs. The
# time-frequency decomposition originally has the same sampling rate as the
# signal, in our case ~600Hz. This means the decomposition is over-specified in
# time and we may not need to use all samples during our CSD computation, just
# enough to get a reliable correlation statistic. By specifying ``decim=10``,
# we use every 10th sample, which will greatly speed up the computation and
# will have a minimal effect on the CSD.
frequencies = [16, 17, 18, 19, 20]
csd_wav = csd_morlet(epochs, frequencies, decim=10, n_jobs=n_jobs)
# %%
# The resulting :class:`mne.time_frequency.CrossSpectralDensity` objects have a
# plotting function we can use to compare the results of the different methods.
# We're plotting the mean CSD across frequencies.
csd_fft.mean().plot()
plt.suptitle('short-term Fourier transform')
csd_mt.mean().plot()
plt.suptitle('adaptive multitapers')
csd_wav.mean().plot()
plt.suptitle('Morlet wavelet transform')
| bsd-3-clause |
inonit/wagtail | wagtail/wagtailcore/tests/test_blocks.py | 4 | 62060 | # -*- coding: utf-8 -*
from __future__ import unicode_literals
import unittest
from django import forms
from django.forms.utils import ErrorList
from django.core.exceptions import ValidationError
from django.test import TestCase, SimpleTestCase
from django.utils.safestring import mark_safe, SafeData
from wagtail.wagtailcore import blocks
from wagtail.wagtailcore.rich_text import RichText
from wagtail.wagtailcore.models import Page
from wagtail.tests.testapp.blocks import SectionBlock
import base64
class TestFieldBlock(unittest.TestCase):
def test_charfield_render(self):
block = blocks.CharBlock()
html = block.render("Hello world!")
self.assertEqual(html, "Hello world!")
def test_charfield_render_form(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!")
self.assertIn('<div class="field char_field widget-text_input">', html)
self.assertIn('<input id="" name="" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_prefix(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!", prefix='foo')
self.assertIn('<input id="foo" name="foo" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_error(self):
block = blocks.CharBlock()
html = block.render_form(
"Hello world!",
errors=ErrorList([ValidationError("This field is required.")]))
self.assertIn('This field is required.', html)
def test_charfield_searchable_content(self):
block = blocks.CharBlock()
content = block.get_searchable_content("Hello world!")
self.assertEqual(content, ["Hello world!"])
def test_choicefield_render(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render('choice-2')
self.assertEqual(html, "choice-2")
def test_choicefield_render_form(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render_form('choice-2')
self.assertIn('<div class="field choice_field widget-select">', html)
self.assertIn('<select id="" name="" placeholder="">', html)
self.assertIn('<option value="choice-1">Choice 1</option>', html)
self.assertIn('<option value="choice-2" selected="selected">Choice 2</option>', html)
def test_searchable_content(self):
"""
FieldBlock should not return anything for `get_searchable_content` by
default. Subclasses are free to override it and provide relevant
content.
"""
class CustomBlock(blocks.FieldBlock):
field = forms.CharField(required=True)
block = CustomBlock()
self.assertEqual(block.get_searchable_content("foo bar"), [])
def test_form_handling_is_independent_of_serialisation(self):
class Base64EncodingCharBlock(blocks.CharBlock):
"""A CharBlock with a deliberately perverse JSON (de)serialisation format
so that it visibly blows up if we call to_python / get_prep_value where we shouldn't"""
def to_python(self, jsonish_value):
# decode as base64 on the way out of the JSON serialisation
return base64.b64decode(jsonish_value)
def get_prep_value(self, native_value):
# encode as base64 on the way into the JSON serialisation
return base64.b64encode(native_value)
block = Base64EncodingCharBlock()
form_html = block.render_form('hello world', 'title')
self.assertIn('value="hello world"', form_html)
value_from_form = block.value_from_datadict({'title': 'hello world'}, {}, 'title')
self.assertEqual('hello world', value_from_form)
class TestRichTextBlock(TestCase):
fixtures = ['test.json']
def test_get_default_with_fallback_value(self):
default_value = blocks.RichTextBlock().get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_default_none(self):
default_value = blocks.RichTextBlock(default=None).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_empty_string(self):
default_value = blocks.RichTextBlock(default='').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_nonempty_string(self):
default_value = blocks.RichTextBlock(default='<p>foo</p>').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_get_default_with_richtext_value(self):
default_value = blocks.RichTextBlock(default=RichText('<p>foo</p>')).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_render(self):
block = blocks.RichTextBlock()
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render(value)
self.assertEqual(
result, '<div class="rich-text"><p>Merry <a href="/events/christmas/">Christmas</a>!</p></div>'
)
def test_render_form(self):
"""
render_form should produce the editor-specific rendition of the rich text value
(which includes e.g. 'data-linktype' attributes on <a> elements)
"""
block = blocks.RichTextBlock()
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render_form(value, prefix='richtext')
self.assertIn(
(
'<p>Merry <a data-linktype="page" data-id="4"'
' href="/events/christmas/">Christmas</a>!</p>'
),
result
)
def test_validate_required_richtext_block(self):
block = blocks.RichTextBlock()
with self.assertRaises(ValidationError):
block.clean(RichText(''))
def test_validate_non_required_richtext_block(self):
block = blocks.RichTextBlock(required=False)
result = block.clean(RichText(''))
self.assertIsInstance(result, RichText)
self.assertEqual(result.source, '')
class TestChoiceBlock(unittest.TestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
self.assertEqual(block.clean(''), '')
self.assertEqual(block.clean(None), '')
def test_render_choice_block_with_existing_blank_choice(self):
block = blocks.ChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="" selected="selected">No thanks</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageChoiceBlock(blocks.ChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertIn('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.wagtailcore.blocks.ChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
def test_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
])
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_optgroup_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('Section 1', [
('1-1', "Block 1"),
('1-2', "Block 2"),
]),
('Section 2', [
('2-1', "Block 1"),
('2-2', "Block 2"),
]),
])
self.assertEqual(block.get_searchable_content("2-2"),
["Section 2", "Block 2"])
def test_invalid_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('one', 'One'),
('two', 'Two'),
])
self.assertEqual(block.get_searchable_content('three'), [])
class TestRawHTMLBlock(unittest.TestCase):
def test_get_default_with_fallback_value(self):
default_value = blocks.RawHTMLBlock().get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_none(self):
default_value = blocks.RawHTMLBlock(default=None).get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_empty_string(self):
default_value = blocks.RawHTMLBlock(default='').get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_nonempty_string(self):
default_value = blocks.RawHTMLBlock(default='<blink>BÖÖM</blink>').get_default()
self.assertEqual(default_value, '<blink>BÖÖM</blink>')
self.assertIsInstance(default_value, SafeData)
def test_serialize(self):
block = blocks.RawHTMLBlock()
result = block.get_prep_value(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertNotIsInstance(result, SafeData)
def test_deserialize(self):
block = blocks.RawHTMLBlock()
result = block.to_python('<blink>BÖÖM</blink>')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render(self):
block = blocks.RawHTMLBlock()
result = block.render(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render_form(self):
block = blocks.RawHTMLBlock()
result = block.render_form(mark_safe('<blink>BÖÖM</blink>'), prefix='rawhtml')
self.assertIn('<textarea ', result)
self.assertIn('name="rawhtml"', result)
self.assertIn('<blink>BÖÖM</blink>', result)
def test_form_response(self):
block = blocks.RawHTMLBlock()
result = block.value_from_datadict({'rawhtml': '<blink>BÖÖM</blink>'}, {}, prefix='rawhtml')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_clean_required_field(self):
block = blocks.RawHTMLBlock()
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
with self.assertRaises(ValidationError):
block.clean(mark_safe(''))
def test_clean_nonrequired_field(self):
block = blocks.RawHTMLBlock(required=False)
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
result = block.clean(mark_safe(''))
self.assertEqual(result, '')
self.assertIsInstance(result, SafeData)
class TestMeta(unittest.TestCase):
def test_set_template_with_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock()
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor(self):
block = blocks.CharBlock(template='heading.html')
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor_overrides_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock(template='subheading.html')
self.assertEqual(block.meta.template, 'subheading.html')
def test_meta_multiple_inheritance(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
test = 'Foo'
class SubHeadingBlock(HeadingBlock):
class Meta:
template = 'subheading.html'
block = SubHeadingBlock()
self.assertEqual(block.meta.template, 'subheading.html')
self.assertEqual(block.meta.test, 'Foo')
class TestStructBlock(SimpleTestCase):
def test_initialisation(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass_with_extra(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock([
('classname', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_multiple_subclassses(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_mixins(self):
"""
The order of fields of classes with multiple parent classes is slightly
surprising at first. Child fields are inherited in a bottom-up order,
by traversing the MRO in reverse. In the example below,
``StyledLinkBlock`` will have an MRO of::
[StyledLinkBlock, StylingMixin, LinkBlock, StructBlock, ...]
This will result in ``classname`` appearing *after* ``title`` and
``link`` in ``StyleLinkBlock`.child_blocks`, even though
``StylingMixin`` appeared before ``LinkBlock``.
"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(StylingMixin, LinkBlock):
source = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()),
['title', 'link', 'classname', 'source'])
def test_render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
expected_html = '\n'.join([
'<dl>',
'<dt>title</dt>',
'<dd>Wagtail site</dd>',
'<dt>link</dt>',
'<dd>http://www.wagtail.io</dd>',
'</dl>',
])
self.assertHTMLEqual(html, expected_html)
def test_render_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}))
self.assertIn('<dt>title</dt>', html)
self.assertIn('<dd>Wagtail site</dd>', html)
self.assertIn('<dt>link</dt>', html)
self.assertIn('<dd>http://www.wagtail.io</dd>', html)
# Don't render the extra item
self.assertNotIn('<dt>image</dt>', html)
def test_render_bound_block(self):
# the string representation of a bound block should be the value as rendered by
# the associated block
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock()
body = blocks.RichTextBlock()
block = SectionBlock()
struct_value = block.to_python({
'title': 'hello',
'body': '<b>world</b>',
})
body_bound_block = struct_value.bound_blocks['body']
expected = '<div class="rich-text"><b>world</b></div>'
self.assertEqual(str(body_bound_block), expected)
def test_render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="struct-block">', html)
self.assertIn('<div class="field char_field widget-text_input fieldname-title">', html)
self.assertIn(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html
)
self.assertIn('<div class="field url_field widget-url_input fieldname-link">', html)
self.assertIn(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.wagtail.io" />'
),
html
)
def test_render_form_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}), prefix='mylink')
self.assertIn(
(
'<input id="mylink-title" name="mylink-title" placeholder="Title"'
' type="text" value="Wagtail site" />'
),
html
)
self.assertIn(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
# Don't render the extra field
self.assertNotIn('mylink-image', html)
def test_render_form_uses_default_value(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
html = block.render_form(block.to_python({}), prefix='mylink')
self.assertIn(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Torchbox" />', html
)
self.assertIn(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.torchbox.com" />'
),
html
)
def test_render_form_with_help_text(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class Meta:
help_text = "Self-promotion is encouraged"
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="object-help help">Self-promotion is encouraged</div>', html)
# check it can be overridden in the block constructor
block = LinkBlock(help_text="Self-promotion is discouraged")
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="object-help help">Self-promotion is discouraged</div>', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class LinkBlock(blocks.StructBlock):
title = ScriptedCharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class LinkBlock(blocks.StructBlock):
title = CharBlockWithDeclarations(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
content = block.get_searchable_content(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
self.assertEqual(content, ["Wagtail site"])
def test_value_from_datadict(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
struct_val = block.value_from_datadict({
'mylink-title': "Torchbox",
'mylink-link': "http://www.torchbox.com"
}, {}, 'mylink')
self.assertEqual(struct_val['title'], "Torchbox")
self.assertEqual(struct_val['link'], "http://www.torchbox.com")
self.assertTrue(isinstance(struct_val, blocks.StructValue))
self.assertTrue(isinstance(struct_val.bound_blocks['link'].block, blocks.URLBlock))
def test_default_is_returned_as_structvalue(self):
"""When returning the default value of a StructBlock (e.g. because it's
a child of another StructBlock, and the outer value is missing that key)
we should receive it as a StructValue, not just a plain dict"""
class PersonBlock(blocks.StructBlock):
first_name = blocks.CharBlock()
surname = blocks.CharBlock()
class EventBlock(blocks.StructBlock):
title = blocks.CharBlock()
guest_speaker = PersonBlock(default={'first_name': 'Ed', 'surname': 'Balls'})
event_block = EventBlock()
event = event_block.to_python({'title': 'Birthday party'})
self.assertEqual(event['guest_speaker']['first_name'], 'Ed')
self.assertTrue(isinstance(event['guest_speaker'], blocks.StructValue))
def test_clean(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
value = block.to_python({'title': 'Torchbox', 'link': 'http://www.torchbox.com/'})
clean_value = block.clean(value)
self.assertTrue(isinstance(clean_value, blocks.StructValue))
self.assertEqual(clean_value['title'], 'Torchbox')
value = block.to_python({'title': 'Torchbox', 'link': 'not a url'})
with self.assertRaises(ValidationError):
block.clean(value)
def test_bound_blocks_are_available_on_template(self):
"""
Test that we are able to use value.bound_blocks within templates
to access a child block's own HTML rendering
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = block.render(value)
self.assertEqual(result, """<h1>Hello</h1><div class="rich-text"><i>italic</i> world</div>""")
class TestListBlock(unittest.TestCase):
def test_initialise_with_class(self):
block = blocks.ListBlock(blocks.CharBlock)
# Child block should be initialised for us
self.assertIsInstance(block.child_block, blocks.CharBlock)
def test_initialise_with_instance(self):
child_block = blocks.CharBlock()
block = blocks.ListBlock(child_block)
self.assertEqual(block.child_block, child_block)
def render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
return block.render([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
def test_render_uses_ul(self):
html = self.render()
self.assertIn('<ul>', html)
self.assertIn('</ul>', html)
def test_render_uses_li(self):
html = self.render()
self.assertIn('<li>', html)
self.assertIn('</li>', html)
def render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
return html
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="sequence-container sequence-type-list">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="links-count" id="links-count" value="2">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-deleted" name="links-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-order" name="links-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="links-1-order" name="links-1-order" value="1">', html)
def test_render_form_labels(self):
html = self.render_form()
self.assertIn('<label for="links-0-value-title">Title</label>', html)
self.assertIn('<label for="links-0-value-link">Link</label>', html)
def test_render_form_values(self):
html = self.render_form()
self.assertIn(
(
'<input id="links-0-value-title" name="links-0-value-title" placeholder="Title"'
' type="text" value="Wagtail" />'
),
html
)
self.assertIn(
(
'<input id="links-0-value-link" name="links-0-value-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
self.assertIn(
(
'<input id="links-1-value-title" name="links-1-value-title" placeholder="Title" type="text"'
' value="Django" />'
),
html
)
self.assertIn(
(
'<input id="links-1-value-link" name="links-1-value-link" placeholder="Link"'
' type="url" value="http://www.djangoproject.com" />'
),
html
)
def test_html_declarations(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertIn(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" />',
html
)
self.assertIn(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" />',
html
)
def test_html_declarations_uses_default(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Github")
link = blocks.URLBlock(default="http://www.github.com")
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertIn(
(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title"'
' type="text" value="Github" />'
),
html
)
self.assertIn(
(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link"'
' type="url" value="http://www.github.com" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
block = blocks.ListBlock(ScriptedCharBlock())
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
block = blocks.ListBlock(CharBlockWithDeclarations())
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
content = block.get_searchable_content([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
self.assertEqual(content, ["Wagtail", "Django"])
def test_ordering_in_form_submission_uses_order_field(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'shoppinglist-count': '3'}
for i in range(0, 3):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(2 - i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 0")
def test_ordering_in_form_submission_is_numeric(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'shoppinglist-count': '12'}
for i in range(0, 12):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 2")
def test_can_specify_default(self):
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(), default=['peas', 'beans', 'carrots'])
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="3">',
form_html
)
self.assertIn('value="peas"', form_html)
def test_default_default(self):
"""
if no explicit 'default' is set on the ListBlock, it should fall back on
a single instance of the child block in its default state.
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="1">',
form_html
)
self.assertIn('value="chocolate"', form_html)
class TestStreamBlock(unittest.TestCase):
def test_initialisation(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_with_binary_string_names(self):
# migrations will sometimes write out names as binary strings, just to keep us on our toes
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), [b'heading', b'paragraph'])
def test_initialisation_from_subclass(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_from_subclass_with_extra(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock([
('intro', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_multiple_subclassses(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class ArticleWithIntroBlock(ArticleBlock):
intro = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_mixins(self):
"""
The order of child blocks of ``StreamBlock``\s with multiple parent
classes is slightly surprising at first. Child blocks are inherited in
a bottom-up order, by traversing the MRO in reverse. In the example
below, ``ArticleWithIntroBlock`` will have an MRO of::
[ArticleWithIntroBlock, IntroMixin, ArticleBlock, StreamBlock, ...]
This will result in ``intro`` appearing *after* ``heading`` and
``paragraph`` in ``ArticleWithIntroBlock.child_blocks``, even though
``IntroMixin`` appeared before ``ArticleBlock``.
"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class IntroMixin(blocks.StreamBlock):
intro = blocks.CharBlock()
class ArticleWithIntroBlock(IntroMixin, ArticleBlock):
by_line = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()),
['heading', 'paragraph', 'intro', 'by_line'])
def render_article(self, data):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.RichTextBlock()
block = ArticleBlock()
value = block.to_python(data)
return block.render(value)
def test_render(self):
html = self.render_article([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My <i>first</i> paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
self.assertIn('<div class="block-heading">My title</div>', html)
self.assertIn('<div class="block-paragraph"><div class="rich-text">My <i>first</i> paragraph</div></div>', html)
self.assertIn('<div class="block-paragraph"><div class="rich-text">My second paragraph</div></div>', html)
def test_render_unknown_type(self):
# This can happen if a developer removes a type from their StreamBlock
html = self.render_article([
{
'type': 'foo',
'value': "Hello",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
])
self.assertNotIn('foo', html)
self.assertNotIn('Hello', html)
self.assertIn('<div class="block-paragraph"><div class="rich-text">My first paragraph</div></div>', html)
def render_form(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
return block.render_form(value, prefix='myarticle')
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="sequence-container sequence-type-stream">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="myarticle-count" id="myarticle-count" value="3">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-deleted" name="myarticle-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-order" name="myarticle-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="myarticle-1-order" name="myarticle-1-order" value="1">', html)
self.assertIn('<input type="hidden" id="myarticle-2-order" name="myarticle-2-order" value="2">', html)
def test_render_form_type_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-type" name="myarticle-0-type" value="heading">', html)
self.assertIn('<input type="hidden" id="myarticle-1-type" name="myarticle-1-type" value="paragraph">', html)
self.assertIn('<input type="hidden" id="myarticle-2-type" name="myarticle-2-type" value="paragraph">', html)
def test_render_form_value_fields(self):
html = self.render_form()
self.assertIn(
(
'<input id="myarticle-0-value" name="myarticle-0-value" placeholder="Heading"'
' type="text" value="My title" />'
),
html
)
self.assertIn(
(
'<input id="myarticle-1-value" name="myarticle-1-value" placeholder="Paragraph"'
' type="text" value="My first paragraph" />'
),
html
)
self.assertIn(
(
'<input id="myarticle-2-value" name="myarticle-2-value" placeholder="Paragraph"'
' type="text" value="My second paragraph" />'
),
html
)
def test_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock()
value = [
blocks.BoundBlock(
block=block.child_blocks['char'],
value='',
),
blocks.BoundBlock(
block=block.child_blocks['char'],
value='foo',
),
blocks.BoundBlock(
block=block.child_blocks['url'],
value='http://example.com/',
),
blocks.BoundBlock(
block=block.child_blocks['url'],
value='not a url',
),
]
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
0: ['This field is required.'],
3: ['Enter a valid URL.'],
})
def test_html_declarations(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
html = block.html_declarations()
self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" />', html)
self.assertIn(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" />',
html
)
def test_html_declarations_uses_default(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock(default="Fish found on moon")
paragraph = blocks.CharBlock(default="Lorem ipsum dolor sit amet")
block = ArticleBlock()
html = block.html_declarations()
self.assertIn(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading"'
' type="text" value="Fish found on moon" />'
),
html
)
self.assertIn(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text"'
' value="Lorem ipsum dolor sit amet" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class ArticleBlock(blocks.StreamBlock):
heading = ScriptedCharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class ArticleBlock(blocks.StreamBlock):
heading = CharBlockWithDeclarations(default="Torchbox")
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_ordering_in_form_submission_uses_order_field(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'article-count': '3'}
for i in range(0, 3):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(2 - i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 0")
def test_ordering_in_form_submission_is_numeric(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'article-count': '12'}
for i in range(0, 12):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 2")
def test_searchable_content(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
content = block.get_searchable_content(value)
self.assertEqual(content, [
"My title",
"My first paragraph",
"My second paragraph",
])
def test_meta_default(self):
"""Test that we can specify a default value in the Meta of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock()
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A default heading')
def test_constructor_default(self):
"""Test that we can specify a default value in the constructor of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock(default=[('heading', 'A different default heading')])
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A different default heading')
class TestPageChooserBlock(TestCase):
fixtures = ['test.json']
def test_serialize(self):
"""The value of a PageChooserBlock (a Page object) should serialize to an ID"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.get_prep_value(christmas_page), christmas_page.id)
# None should serialize to None
self.assertEqual(block.get_prep_value(None), None)
def test_deserialize(self):
"""The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.to_python(christmas_page.id), christmas_page)
# None should deserialize to None
self.assertEqual(block.to_python(None), None)
def test_form_render(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page")
empty_form_html = block.render_form(None, 'page')
self.assertIn('<input id="page" name="page" placeholder="" type="hidden" />', empty_form_html)
self.assertIn('createPageChooser("page", ["wagtailcore.page"], null, false);', empty_form_html)
christmas_page = Page.objects.get(slug='christmas')
christmas_form_html = block.render_form(christmas_page, 'page')
expected_html = '<input id="page" name="page" placeholder="" type="hidden" value="%d" />' % christmas_page.id
self.assertIn(expected_html, christmas_form_html)
self.assertIn("pick a page, any page", christmas_form_html)
def test_form_render_with_can_choose_root(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", can_choose_root=True)
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["wagtailcore.page"], null, true);', empty_form_html)
def test_form_response(self):
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
value = block.value_from_datadict({'page': str(christmas_page.id)}, {}, 'page')
self.assertEqual(value, christmas_page)
empty_value = block.value_from_datadict({'page': ''}, {}, 'page')
self.assertEqual(empty_value, None)
def test_clean(self):
required_block = blocks.PageChooserBlock()
nonrequired_block = blocks.PageChooserBlock(required=False)
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(required_block.clean(christmas_page), christmas_page)
with self.assertRaises(ValidationError):
required_block.clean(None)
self.assertEqual(nonrequired_block.clean(christmas_page), christmas_page)
self.assertEqual(nonrequired_block.clean(None), None)
class TestSystemCheck(TestCase):
def test_name_must_be_nonempty(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block name cannot be empty")
self.assertEqual(errors[0].obj, block.child_blocks[''])
def test_name_cannot_contain_spaces(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, block.child_blocks['rich text'])
def test_name_cannot_contain_dashes(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich-text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain dashes")
self.assertEqual(errors[0].obj, block.child_blocks['rich-text'])
def test_name_cannot_begin_with_digit(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('99richtext', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot begin with a digit")
self.assertEqual(errors[0].obj, block.child_blocks['99richtext'])
def test_system_checks_recurse_into_lists(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('paragraph_list', blocks.ListBlock(
blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
])
))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_streams(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('carousel', blocks.StreamBlock([
('text', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_structs(self):
failing_block_1 = blocks.RichTextBlock()
failing_block_2 = blocks.RichTextBlock()
block = blocks.StreamBlock([
('two_column', blocks.StructBlock([
('left', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_1),
])),
('right', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_2),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block_1)
self.assertEqual(errors[1].id, 'wagtailcore.E001')
self.assertEqual(errors[1].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block_2)
class TestTemplateRendering(TestCase):
def test_render_with_custom_context(self):
from wagtail.tests.testapp.blocks import LinkBlock
block = LinkBlock()
value = block.to_python({'title': 'Torchbox', 'url': 'http://torchbox.com/'})
result = block.render(value)
self.assertEqual(result, '<a href="http://torchbox.com/" class="important">Torchbox</a>')
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter6/fig_great_wall_KDE.py | 4 | 5422 | """
Great Wall KDE
--------------
Figure 6.3
Kernel density estimation for galaxies within the SDSS "Great Wall." The
top-left panel shows points that are galaxies, projected by their spatial
locations (right ascension and distance determined from redshift measurement)
onto the equatorial plane (declination ~ 0 degrees). The remaining panels show
estimates of the density of these points using kernel density estimation with
a Gaussian kernel (upper right), a top-hat kernel (lower left), and an
exponential kernel (lower right). Compare also to figure 6.4.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from scipy.spatial import cKDTree
from scipy.stats import gaussian_kde
from astroML.datasets import fetch_great_wall
# Scikit-learn 0.14 added sklearn.neighbors.KernelDensity, which is a very
# fast kernel density estimator based on a KD Tree. We'll use this if
# available (and raise a warning if it isn't).
try:
from sklearn.neighbors import KernelDensity
use_sklearn_KDE = True
except:
import warnings
warnings.warn("KDE will be removed in astroML version 0.3. Please "
"upgrade to scikit-learn 0.14+ and use "
"sklearn.neighbors.KernelDensity.", DeprecationWarning)
from astroML.density_estimation import KDE
use_sklearn_KDE = False
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the great wall data
X = fetch_great_wall()
#------------------------------------------------------------
# Create the grid on which to evaluate the results
Nx = 50
Ny = 125
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Evaluate for several models
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
kernels = ['gaussian', 'tophat', 'exponential']
dens = []
if use_sklearn_KDE:
kde1 = KernelDensity(5, kernel='gaussian')
log_dens1 = kde1.fit(X).score_samples(Xgrid)
dens1 = X.shape[0] * np.exp(log_dens1).reshape((Ny, Nx))
kde2 = KernelDensity(5, kernel='tophat')
log_dens2 = kde2.fit(X).score_samples(Xgrid)
dens2 = X.shape[0] * np.exp(log_dens2).reshape((Ny, Nx))
kde3 = KernelDensity(5, kernel='exponential')
log_dens3 = kde3.fit(X).score_samples(Xgrid)
dens3 = X.shape[0] * np.exp(log_dens3).reshape((Ny, Nx))
else:
kde1 = KDE(metric='gaussian', h=5)
dens1 = kde1.fit(X).eval(Xgrid).reshape((Ny, Nx))
kde2 = KDE(metric='tophat', h=5)
dens2 = kde2.fit(X).eval(Xgrid).reshape((Ny, Nx))
kde3 = KDE(metric='exponential', h=5)
dens3 = kde3.fit(X).eval(Xgrid).reshape((Ny, Nx))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.2))
fig.subplots_adjust(left=0.12, right=0.95, bottom=0.2, top=0.9,
hspace=0.01, wspace=0.01)
# First plot: scatter the points
ax1 = plt.subplot(221, aspect='equal')
ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax1.text(0.95, 0.9, "input", ha='right', va='top',
transform=ax1.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Second plot: gaussian kernel
ax2 = plt.subplot(222, aspect='equal')
ax2.imshow(dens1.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax2.text(0.95, 0.9, "Gaussian $(h=5)$", ha='right', va='top',
transform=ax2.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Third plot: top-hat kernel
ax3 = plt.subplot(223, aspect='equal')
ax3.imshow(dens2.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax3.text(0.95, 0.9, "top-hat $(h=5)$", ha='right', va='top',
transform=ax3.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
ax3.images[0].set_clim(0.01, 0.8)
# Fourth plot: exponential kernel
ax4 = plt.subplot(224, aspect='equal')
ax4.imshow(dens3.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax4.text(0.95, 0.9, "exponential $(h=5)$", ha='right', va='top',
transform=ax4.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xlim(ymin, ymax - 0.01)
ax.set_ylim(xmin, xmax)
for ax in [ax1, ax2]:
ax.xaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax3, ax4]:
ax.set_xlabel('$y$ (Mpc)')
for ax in [ax2, ax4]:
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax1, ax3]:
ax.set_ylabel('$x$ (Mpc)')
plt.show()
| bsd-2-clause |
keras-team/keras-io | examples/vision/vivit.py | 1 | 13690 | """
Title: Video Vision Transformer
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ayush Thakur](https://twitter.com/ayushthakur0) (equal contribution)
Date created: 2022/01/12
Last modified: 2022/01/12
Description: A Transformer-based architecture for video classification.
"""
"""
## Introduction
Videos are sequences of images. Let's assume you have an image
representation model (CNN, ViT, etc.) and a sequence model
(RNN, LSTM, etc.) at hand. We ask you to tweak the model for video
classification. The simplest approach would be to apply the image
model to individual frames, use the sequence model to learn
sequences of image features, then apply a classification head on
the learned sequence representation.
The Keras example
[Video Classification with a CNN-RNN Architecture](https://keras.io/examples/vision/video_classification/)
explains this approach in detail. Alernatively, you can also
build a hybrid Transformer-based model for video classification as shown in the Keras example
[Video Classification with Transformers](https://keras.io/examples/vision/video_transformers/).
In this example, we minimally implement
[ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691)
by Arnab et al., a **pure Transformer-based** model
for video classification. The authors propose a novel embedding scheme
and a number of Transformer variants to model video clips. We implement
the embedding scheme and one of the variants of the Transformer
architecture, for simplicity.
This example requires TensorFlow 2.6 or higher, and the `medmnist`
package, which can be installed by running the code cell below.
"""
"""shell
pip install -qq medmnist
"""
"""
## Imports
"""
import os
import io
import imageio
import medmnist
import ipywidgets
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Setting seed for reproducibility
SEED = 42
os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
keras.utils.set_random_seed(SEED)
"""
## Hyperparameters
The hyperparameters are chosen via hyperparameter
search. You can learn more about the process in the "conclusion" section.
"""
# DATA
DATASET_NAME = "organmnist3d"
BATCH_SIZE = 32
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (28, 28, 28, 1)
NUM_CLASSES = 11
# OPTIMIZER
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-5
# TRAINING
EPOCHS = 60
# TUBELET EMBEDDING
PATCH_SIZE = (8, 8, 8)
NUM_PATCHES = (INPUT_SHAPE[0] // PATCH_SIZE[0]) ** 2
# ViViT ARCHITECTURE
LAYER_NORM_EPS = 1e-6
PROJECTION_DIM = 128
NUM_HEADS = 8
NUM_LAYERS = 8
"""
## Dataset
For our example we use the
[MedMNIST v2: A Large-Scale Lightweight Benchmark for 2D and 3D Biomedical Image Classification](https://medmnist.com/)
dataset. The videos are lightweight and easy to train on.
"""
def download_and_prepare_dataset(data_info: dict):
"""Utility function to download the dataset.
Arguments:
data_info (dict): Dataset metadata.
"""
data_path = keras.utils.get_file(origin=data_info["url"], md5_hash=data_info["MD5"])
with np.load(data_path) as data:
# Get videos
train_videos = data["train_images"]
valid_videos = data["val_images"]
test_videos = data["test_images"]
# Get labels
train_labels = data["train_labels"].flatten()
valid_labels = data["val_labels"].flatten()
test_labels = data["test_labels"].flatten()
return (
(train_videos, train_labels),
(valid_videos, valid_labels),
(test_videos, test_labels),
)
# Get the metadata of the dataset
info = medmnist.INFO[DATASET_NAME]
# Get the dataset
prepared_dataset = download_and_prepare_dataset(info)
(train_videos, train_labels) = prepared_dataset[0]
(valid_videos, valid_labels) = prepared_dataset[1]
(test_videos, test_labels) = prepared_dataset[2]
"""
### `tf.data` pipeline
"""
@tf.function
def preprocess(frames: tf.Tensor, label: tf.Tensor):
"""Preprocess the frames tensors and parse the labels."""
# Preprocess images
frames = tf.image.convert_image_dtype(
frames[
..., tf.newaxis
], # The new axis is to help for further processing with Conv3D layers
tf.float32,
)
# Parse label
label = tf.cast(label, tf.float32)
return frames, label
def prepare_dataloader(
videos: np.ndarray,
labels: np.ndarray,
loader_type: str = "train",
batch_size: int = BATCH_SIZE,
):
"""Utility function to prepare the dataloader."""
dataset = tf.data.Dataset.from_tensor_slices((videos, labels))
if loader_type == "train":
dataset = dataset.shuffle(BATCH_SIZE * 2)
dataloader = (
dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
return dataloader
trainloader = prepare_dataloader(train_videos, train_labels, "train")
validloader = prepare_dataloader(valid_videos, valid_labels, "valid")
testloader = prepare_dataloader(test_videos, test_labels, "test")
"""
## Tubelet Embedding
In ViTs, an image is divided into patches, which are then spatially
flattened, a process known as tokenization. For a video, one can
repeat this process for individual frames. **Uniform frame sampling**
as suggested by the authors is a tokenization scheme in which we
sample frames from the video clip and perform simple ViT tokenization.
| ![uniform frame sampling](https://i.imgur.com/aaPyLPX.png) |
| :--: |
| Uniform Frame Sampling [Source](https://arxiv.org/abs/2103.15691) |
**Tubelet Embedding** is different in terms of capturing temporal
information from the video.
First, we extract volumes from the video -- these volumes contain
patches of the frame and the temporal information as well. The volumes
are then flattened to build video tokens.
| ![tubelet embedding](https://i.imgur.com/9G7QTfV.png) |
| :--: |
| Tubelet Embedding [Source](https://arxiv.org/abs/2103.15691) |
"""
class TubeletEmbedding(layers.Layer):
def __init__(self, embed_dim, patch_size, **kwargs):
super().__init__(**kwargs)
self.projection = layers.Conv3D(
filters=embed_dim,
kernel_size=patch_size,
strides=patch_size,
padding="VALID",
)
self.flatten = layers.Reshape(target_shape=(-1, embed_dim))
def call(self, videos):
projected_patches = self.projection(videos)
flattened_patches = self.flatten(projected_patches)
return flattened_patches
"""
## Positional Embedding
This layer adds positional information to the encoded video tokens.
"""
class PositionalEncoder(layers.Layer):
def __init__(self, embed_dim, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
def build(self, input_shape):
_, num_tokens, _ = input_shape
self.position_embedding = layers.Embedding(
input_dim=num_tokens, output_dim=self.embed_dim
)
self.positions = tf.range(start=0, limit=num_tokens, delta=1)
def call(self, encoded_tokens):
# Encode the positions and add it to the encoded tokens
encoded_positions = self.position_embedding(self.positions)
encoded_tokens = encoded_tokens + encoded_positions
return encoded_tokens
"""
## Video Vision Transformer
The authors suggest 4 variants of Vision Transformer:
- Spatio-temporal attention
- Factorized encoder
- Factorized self-attention
- Factorized dot-product attention
In this example, we will implement the **Spatio-temporal attention**
model for simplicity. The following code snippet is heavily inspired from
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
One can also refer to the
[official repository of ViViT](https://github.com/google-research/scenic/tree/main/scenic/projects/vivit)
which contains all the variants, implemented in JAX.
"""
def create_vivit_classifier(
tubelet_embedder,
positional_encoder,
input_shape=INPUT_SHAPE,
transformer_layers=NUM_LAYERS,
num_heads=NUM_HEADS,
embed_dim=PROJECTION_DIM,
layer_norm_eps=LAYER_NORM_EPS,
num_classes=NUM_CLASSES,
):
# Get the input layer
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = tubelet_embedder(inputs)
# Encode patches.
encoded_patches = positional_encoder(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization and MHSA
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim // num_heads, dropout=0.1
)(x1, x1)
# Skip connection
x2 = layers.Add()([attention_output, encoded_patches])
# Layer Normalization and MLP
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
x3 = keras.Sequential(
[
layers.Dense(units=embed_dim * 4, activation=tf.nn.gelu),
layers.Dense(units=embed_dim, activation=tf.nn.gelu),
]
)(x3)
# Skip connection
encoded_patches = layers.Add()([x3, x2])
# Layer normalization and Global average pooling.
representation = layers.LayerNormalization(epsilon=layer_norm_eps)(encoded_patches)
representation = layers.GlobalAvgPool1D()(representation)
# Classify outputs.
outputs = layers.Dense(units=num_classes, activation="softmax")(representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Train
"""
def run_experiment():
# Initialize model
model = create_vivit_classifier(
tubelet_embedder=TubeletEmbedding(
embed_dim=PROJECTION_DIM, patch_size=PATCH_SIZE
),
positional_encoder=PositionalEncoder(embed_dim=PROJECTION_DIM),
)
# Compile the model with the optimizer, loss function
# and the metrics.
optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
# Train the model.
_ = model.fit(trainloader, epochs=EPOCHS, validation_data=validloader)
_, accuracy, top_5_accuracy = model.evaluate(testloader)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return model
model = run_experiment()
"""
## Inference
"""
NUM_SAMPLES_VIZ = 25
testsamples, labels = next(iter(testloader))
testsamples, labels = testsamples[:NUM_SAMPLES_VIZ], labels[:NUM_SAMPLES_VIZ]
ground_truths = []
preds = []
videos = []
for i, (testsample, label) in enumerate(zip(testsamples, labels)):
# Generate gif
with io.BytesIO() as gif:
imageio.mimsave(gif, (testsample.numpy() * 255).astype("uint8"), "GIF", fps=5)
videos.append(gif.getvalue())
# Get model prediction
output = model.predict(tf.expand_dims(testsample, axis=0))[0]
pred = np.argmax(output, axis=0)
ground_truths.append(label.numpy().astype("int"))
preds.append(pred)
def make_box_for_grid(image_widget, fit):
"""Make a VBox to hold caption/image for demonstrating option_fit values.
Source: https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Styling.html
"""
# Make the caption
if fit is not None:
fit_str = "'{}'".format(fit)
else:
fit_str = str(fit)
h = ipywidgets.HTML(value="" + str(fit_str) + "")
# Make the green box with the image widget inside it
boxb = ipywidgets.widgets.Box()
boxb.children = [image_widget]
# Compose into a vertical box
vb = ipywidgets.widgets.VBox()
vb.layout.align_items = "center"
vb.children = [h, boxb]
return vb
boxes = []
for i in range(NUM_SAMPLES_VIZ):
ib = ipywidgets.widgets.Image(value=videos[i], width=100, height=100)
true_class = info["label"][str(ground_truths[i])]
pred_class = info["label"][str(preds[i])]
caption = f"T: {true_class} | P: {pred_class}"
boxes.append(make_box_for_grid(ib, caption))
ipywidgets.widgets.GridBox(
boxes, layout=ipywidgets.widgets.Layout(grid_template_columns="repeat(5, 200px)")
)
"""
## Final thoughts
With a vanilla implementation, we achieve ~79-80% Top-1 accuracy on the
test dataset.
The hyperparameters used in this tutorial were finalized by running a
hyperparameter search using
[W&B Sweeps](https://docs.wandb.ai/guides/sweeps).
You can find out our sweeps result
[here](https://wandb.ai/minimal-implementations/vivit/sweeps/66fp0lhz)
and our quick analysis of the results
[here](https://wandb.ai/minimal-implementations/vivit/reports/Hyperparameter-Tuning-Analysis--VmlldzoxNDEwNzcx).
For further improvement, you could look into the following:
- Using data augmentation for videos.
- Using a better regularization scheme for training.
- Apply different variants of the transformer model as in the paper.
We would like to thank [Anurag Arnab](https://anuragarnab.github.io/)
(first author of ViViT) for helpful discussion. We are grateful to
[Weights and Biases](https://wandb.ai/site) program for helping with
GPU credits.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/video-vision-transformer)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/video-vision-transformer-CT).
"""
| apache-2.0 |
pravsripad/mne-python | tutorials/preprocessing/40_artifact_correction_ica.py | 2 | 29198 | # -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
============================
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts. For conceptual background on ICA, see
:ref:`this scikit-learn tutorial
<sphx_glr_auto_examples_decomposition_plot_ica_blind_source_separation.py>`.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
# %%
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# Here we'll crop to 60 seconds and drop gradiometer channels for speed
raw.crop(tmax=60.).pick_types(meg='mag', eeg=True, stim=True, eog=True)
raw.load_data()
# %%
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), simply
# pass ``n_components``.
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set ``n_components=50``
# during instantiation and pass ``n_pca_components=None`` to
# `~mne.preprocessing.ICA.apply`, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality, set ``n_components=n`` during initialization and pass
# ``n_pca_components=n`` to `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See
# :footcite:`AblinEtAl2018` for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
#
# .. raw:: html
#
# <a href=
# "../../_images/graphviz-7483cb1cf41f06e2a4ef451b17f073dbe584ba30.png">
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# .. raw:: html
#
# </a>
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
# %%
# We can get a summary of how the ocular artifact manifests across each channel
# type using `~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
# %%
# Now we'll do the same for the heartbeat artifacts, using
# `~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
# %%
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see
# :footcite:`WinklerEtAl2015` for
# more information), so we'll keep a copy of the unfiltered
# `~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy().filter(l_freq=1., h_freq=None)
# %%
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous `~mne.Epochs` or `~mne.Evoked` objects (not
# just continuous `~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# .. note:: `~mne.Epochs` used for fitting ICA should not be
# baseline-corrected. Because cleaning the data via ICA may
# introduce DC offsets, we suggest to baseline correct your data
# **after** cleaning (and not before), should you require
# baseline correction.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, max_iter='auto', random_state=97)
ica.fit(filt_raw)
ica
# %%
# Some optional parameters that we could have passed to the
# `~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# `~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to `~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered `~mne.io.Raw` object. A helpful tip is that
# right clicking (or control + click with a trackpad) on the name of the
# component will bring up a plot of its properties. In this plot, you can
# also toggle the channel type in the topoplot (if you have multiple channel
# types) with 't' and whether the spectrum is log-scaled or not with 'l'.
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
# %%
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# `~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
# %%
# .. note::
#
# `~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of `~mne.io.Raw` or `~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic `~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# `~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
# %%
# We can also plot some diagnostics of each IC using
# `~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
# %%
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# `~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using `mne.preprocessing.ICA.save`
# and `mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
# %%
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the `~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
# %%
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use `~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# `~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
# %%
# Note that above we used `~mne.preprocessing.ICA.plot_sources` on both
# the original `~mne.io.Raw` instance and also on an
# `~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that `~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# `~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: `~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# `~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics
# :footcite:`DammersEtAl2008`) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
# %%
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# `~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, max_iter='auto', random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
# %%
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like `~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# %%
# For EEG, activation of muscles for postural control of the head and neck
# contaminate the signal as well. This is usually not detected by MEG. For
# an example showing how to remove these components, see :ref:`ex-muscle-ica`.
# clean up memory before moving on
del raw, ica, new_ica
# %%
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using `mne.preprocessing.corrmap` :footcite:`CamposViolaEtAl2009`.
# The idea behind `~mne.preprocessing.corrmap` is that the artifact patterns
# are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# `~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset
# :footcite:`SchalkEtAl2004,GoldbergerEtAl2000`. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname).load_data().resample(50)
# remove trailing `.` from channel names so we can set montage
mne.datasets.eegbci.standardize(raw)
raw.set_montage('standard_1005')
# high-pass filter
raw_filt = raw.copy().load_data().filter(l_freq=1., h_freq=None)
# fit ICA, using low max_iter for speed
ica = ICA(n_components=30, max_iter=100, random_state=97)
ica.fit(raw_filt, verbose='error')
raws.append(raw)
icas.append(ica)
# %%
# Now let's run `~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
# %%
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There is one match for each subject, but it's a good idea
# to also double-check the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
with mne.viz.use_browser_backend('matplotlib'):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
# %%
# Notice that subjects 2 and 3 each seem to have *two* ICs that reflect ocular
# activity (components ``ICA000`` and ``ICA002``), but only one was caught by
# `~mne.preprocessing.corrmap`. Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
# %%
# This time it found 2 ICs for each of subjects 2 and 3 (which is good).
# At this point we'll re-run `~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
# %%
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# `~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by the
# last call to `~mne.preprocessing.corrmap`. Notice also that each subject has
# at least one IC index labelled "blink", and subjects 2 and 3 each have two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of `~mne.preprocessing.ICA` objects can
# also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
# %%
# As a final note, it is possible to extract ICs numerically using the
# `~mne.preprocessing.ICA.get_components` method of
# `~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# `~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
# %%
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using `~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# `~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to `~mne.preprocessing.corrmap`.
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
# %%
# Compute ICA components on Epochs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ICA is now fit to epoched MEG data instead of the raw data.
# We assume that the non-stationary EOG artifacts have already been removed.
# The sources matching the ECG are automatically found and displayed.
#
# .. note::
# This example is computationally intensive, so it might take a few minutes
# to complete.
#
# After reading the data, preprocessing consists of:
#
# - MEG channel selection
# - 1-30 Hz band-pass filter
# - epoching -0.2 to 0.5 seconds with respect to events
# - rejection based on peak-to-peak amplitude
#
# Note that we don't baseline correct the epochs here – we'll do this after
# cleaning with ICA is completed. Baseline correction before ICA is not
# recommended by the MNE-Python developers, as it doesn't guarantee optimal
# results.
filt_raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data()
filt_raw.filter(1, 30, fir_design='firwin')
# peak-to-peak amplitude rejection parameters
reject = dict(mag=4e-12)
# create longer and more epochs for more artifact exposure
events = mne.find_events(filt_raw, stim_channel='STI 014')
# don't baseline correct epochs
epochs = mne.Epochs(filt_raw, events, event_id=None, tmin=-0.2, tmax=0.5,
reject=reject, baseline=None)
# %%
# Fit ICA model using the FastICA algorithm, detect and plot components
# explaining ECG artifacts.
ica = ICA(n_components=15, method='fastica', max_iter="auto").fit(epochs)
ecg_epochs = create_ecg_epochs(filt_raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto')
ica.plot_components(ecg_inds)
# %%
# Plot the properties of the ECG components:
ica.plot_properties(epochs, picks=ecg_inds)
# %%
# Plot the estimated sources of detected ECG related components:
ica.plot_sources(filt_raw, picks=ecg_inds)
# %%
# References
# ^^^^^^^^^^
# .. footbibliography::
| bsd-3-clause |
daydayuplo/gee | earth_enterprise/src/server/pywms/ogc/wmts/xml/capabilities.py | 4 | 349901 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Generated Wed Aug 31 14:02:20 2011 by generateDS.py version 2.5a.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
STRING_CLEANUP_PAT = re_.compile(r"[\n\r\s]+")
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
# First try with no namespace.
value = attrs.get(attr_name)
if value is None:
# Now try the other possible namespaces.
namespaces = node.nsmap.itervalues()
for namespace in namespaces:
value = attrs.get('{%s}%s' % (namespace, attr_name, ))
if value is not None:
break
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class TileMatrixSetLink(GeneratedsSuper):
"""Metadata about the TileMatrixSet reference."""
subclass = None
superclass = None
def __init__(self, TileMatrixSet=None, TileMatrixSetLimits=None):
self.TileMatrixSet = TileMatrixSet
self.TileMatrixSetLimits = TileMatrixSetLimits
def factory(*args_, **kwargs_):
if TileMatrixSetLink.subclass:
return TileMatrixSetLink.subclass(*args_, **kwargs_)
else:
return TileMatrixSetLink(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TileMatrixSet(self): return self.TileMatrixSet
def set_TileMatrixSet(self, TileMatrixSet): self.TileMatrixSet = TileMatrixSet
def get_TileMatrixSetLimits(self): return self.TileMatrixSetLimits
def set_TileMatrixSetLimits(self, TileMatrixSetLimits): self.TileMatrixSetLimits = TileMatrixSetLimits
def export(self, outfile, level, namespace_='', name_='TileMatrixSetLink', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TileMatrixSetLink')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TileMatrixSetLink'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TileMatrixSetLink', fromsubclass_=False):
if self.TileMatrixSet is not None:
showIndent(outfile, level)
outfile.write('<%sTileMatrixSet>%s</%sTileMatrixSet>\n' % (namespace_, self.gds_format_string(quote_xml(self.TileMatrixSet).encode(ExternalEncoding), input_name='TileMatrixSet'), namespace_))
if self.TileMatrixSetLimits:
self.TileMatrixSetLimits.export(outfile, level, namespace_, name_='TileMatrixSetLimits')
def hasContent_(self):
if (
self.TileMatrixSet is not None or
self.TileMatrixSetLimits is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TileMatrixSetLink'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.TileMatrixSet is not None:
showIndent(outfile, level)
outfile.write('TileMatrixSet=%s,\n' % quote_python(self.TileMatrixSet).encode(ExternalEncoding))
if self.TileMatrixSetLimits is not None:
showIndent(outfile, level)
outfile.write('TileMatrixSetLimits=model_.TileMatrixSetLimits(\n')
self.TileMatrixSetLimits.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TileMatrixSet':
TileMatrixSet_ = child_.text
TileMatrixSet_ = self.gds_validate_string(TileMatrixSet_, node, 'TileMatrixSet')
self.TileMatrixSet = TileMatrixSet_
elif nodeName_ == 'TileMatrixSetLimits':
obj_ = TileMatrixSetLimits.factory()
obj_.build(child_)
self.set_TileMatrixSetLimits(obj_)
# end class TileMatrixSetLink
class TileMatrixSetLimits(GeneratedsSuper):
"""Metadata about a the limits of the tile row and tile col indices."""
subclass = None
superclass = None
def __init__(self, TileMatrixLimits=None):
if TileMatrixLimits is None:
self.TileMatrixLimits = []
else:
self.TileMatrixLimits = TileMatrixLimits
def factory(*args_, **kwargs_):
if TileMatrixSetLimits.subclass:
return TileMatrixSetLimits.subclass(*args_, **kwargs_)
else:
return TileMatrixSetLimits(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TileMatrixLimits(self): return self.TileMatrixLimits
def set_TileMatrixLimits(self, TileMatrixLimits): self.TileMatrixLimits = TileMatrixLimits
def add_TileMatrixLimits(self, value): self.TileMatrixLimits.append(value)
def insert_TileMatrixLimits(self, index, value): self.TileMatrixLimits[index] = value
def export(self, outfile, level, namespace_='', name_='TileMatrixSetLimits', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TileMatrixSetLimits')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TileMatrixSetLimits'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TileMatrixSetLimits', fromsubclass_=False):
for TileMatrixLimits_ in self.TileMatrixLimits:
TileMatrixLimits_.export(outfile, level, namespace_, name_='TileMatrixLimits')
def hasContent_(self):
if (
self.TileMatrixLimits
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TileMatrixSetLimits'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('TileMatrixLimits=[\n')
level += 1
for TileMatrixLimits_ in self.TileMatrixLimits:
showIndent(outfile, level)
outfile.write('model_.TileMatrixLimits(\n')
TileMatrixLimits_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TileMatrixLimits':
obj_ = TileMatrixLimits.factory()
obj_.build(child_)
self.TileMatrixLimits.append(obj_)
# end class TileMatrixSetLimits
class TileMatrixLimits(GeneratedsSuper):
"""Metadata describing the limits of a TileMatrix for this layer."""
subclass = None
superclass = None
def __init__(self, TileMatrix=None, MinTileRow=None, MaxTileRow=None, MinTileCol=None, MaxTileCol=None):
self.TileMatrix = TileMatrix
self.MinTileRow = MinTileRow
self.MaxTileRow = MaxTileRow
self.MinTileCol = MinTileCol
self.MaxTileCol = MaxTileCol
def factory(*args_, **kwargs_):
if TileMatrixLimits.subclass:
return TileMatrixLimits.subclass(*args_, **kwargs_)
else:
return TileMatrixLimits(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TileMatrix(self): return self.TileMatrix
def set_TileMatrix(self, TileMatrix): self.TileMatrix = TileMatrix
def get_MinTileRow(self): return self.MinTileRow
def set_MinTileRow(self, MinTileRow): self.MinTileRow = MinTileRow
def get_MaxTileRow(self): return self.MaxTileRow
def set_MaxTileRow(self, MaxTileRow): self.MaxTileRow = MaxTileRow
def get_MinTileCol(self): return self.MinTileCol
def set_MinTileCol(self, MinTileCol): self.MinTileCol = MinTileCol
def get_MaxTileCol(self): return self.MaxTileCol
def set_MaxTileCol(self, MaxTileCol): self.MaxTileCol = MaxTileCol
def export(self, outfile, level, namespace_='', name_='TileMatrixLimits', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TileMatrixLimits')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TileMatrixLimits'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TileMatrixLimits', fromsubclass_=False):
if self.TileMatrix is not None:
showIndent(outfile, level)
outfile.write('<%sTileMatrix>%s</%sTileMatrix>\n' % (namespace_, self.gds_format_string(quote_xml(self.TileMatrix).encode(ExternalEncoding), input_name='TileMatrix'), namespace_))
if self.MinTileRow is not None:
showIndent(outfile, level)
outfile.write('<%sMinTileRow>%s</%sMinTileRow>\n' % (namespace_, self.gds_format_integer(self.MinTileRow, input_name='MinTileRow'), namespace_))
if self.MaxTileRow is not None:
showIndent(outfile, level)
outfile.write('<%sMaxTileRow>%s</%sMaxTileRow>\n' % (namespace_, self.gds_format_integer(self.MaxTileRow, input_name='MaxTileRow'), namespace_))
if self.MinTileCol is not None:
showIndent(outfile, level)
outfile.write('<%sMinTileCol>%s</%sMinTileCol>\n' % (namespace_, self.gds_format_integer(self.MinTileCol, input_name='MinTileCol'), namespace_))
if self.MaxTileCol is not None:
showIndent(outfile, level)
outfile.write('<%sMaxTileCol>%s</%sMaxTileCol>\n' % (namespace_, self.gds_format_integer(self.MaxTileCol, input_name='MaxTileCol'), namespace_))
def hasContent_(self):
if (
self.TileMatrix is not None or
self.MinTileRow is not None or
self.MaxTileRow is not None or
self.MinTileCol is not None or
self.MaxTileCol is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TileMatrixLimits'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.TileMatrix is not None:
showIndent(outfile, level)
outfile.write('TileMatrix=%s,\n' % quote_python(self.TileMatrix).encode(ExternalEncoding))
if self.MinTileRow is not None:
showIndent(outfile, level)
outfile.write('MinTileRow=%d,\n' % self.MinTileRow)
if self.MaxTileRow is not None:
showIndent(outfile, level)
outfile.write('MaxTileRow=%d,\n' % self.MaxTileRow)
if self.MinTileCol is not None:
showIndent(outfile, level)
outfile.write('MinTileCol=%d,\n' % self.MinTileCol)
if self.MaxTileCol is not None:
showIndent(outfile, level)
outfile.write('MaxTileCol=%d,\n' % self.MaxTileCol)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TileMatrix':
TileMatrix_ = child_.text
TileMatrix_ = self.gds_validate_string(TileMatrix_, node, 'TileMatrix')
self.TileMatrix = TileMatrix_
elif nodeName_ == 'MinTileRow':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MinTileRow')
self.MinTileRow = ival_
elif nodeName_ == 'MaxTileRow':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MaxTileRow')
self.MaxTileRow = ival_
elif nodeName_ == 'MinTileCol':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MinTileCol')
self.MinTileCol = ival_
elif nodeName_ == 'MaxTileCol':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MaxTileCol')
self.MaxTileCol = ival_
# end class TileMatrixLimits
class URLTemplateType(GeneratedsSuper):
"""Format of the resource representation that can be retrieved one
resolved the URL template.Resource type to be retrieved. It can
only be "tile" or "FeatureInfo"URL template. A template
processor will be applied to substitute some variables between
{} for their values and get a URL to a resource. We cound not
use a anyURI type (that conforms the character restrictions
specified in RFC2396 and excludes '{' '}' characters in some XML
parsers) because this attribute must accept the '{' '}'
caracters."""
subclass = None
superclass = None
def __init__(self, resourceType=None, template=None, format=None, valueOf_=None):
self.resourceType = _cast(None, resourceType)
self.template = _cast(None, template)
self.format = _cast(None, format)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if URLTemplateType.subclass:
return URLTemplateType.subclass(*args_, **kwargs_)
else:
return URLTemplateType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_resourceType(self): return self.resourceType
def set_resourceType(self, resourceType): self.resourceType = resourceType
def get_template(self): return self.template
def set_template(self, template): self.template = template
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='URLTemplateType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='URLTemplateType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='URLTemplateType'):
if self.resourceType is not None and 'resourceType' not in already_processed:
already_processed.append('resourceType')
outfile.write(' resourceType=%s' % (self.gds_format_string(quote_attrib(self.resourceType).encode(ExternalEncoding), input_name='resourceType'), ))
if self.template is not None and 'template' not in already_processed:
already_processed.append('template')
outfile.write(' template=%s' % (self.gds_format_string(quote_attrib(self.template).encode(ExternalEncoding), input_name='template'), ))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
outfile.write(' format=%s' % (quote_attrib(self.format), ))
def exportChildren(self, outfile, level, namespace_='', name_='URLTemplateType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='URLTemplateType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.resourceType is not None and 'resourceType' not in already_processed:
already_processed.append('resourceType')
showIndent(outfile, level)
outfile.write('resourceType = "%s",\n' % (self.resourceType,))
if self.template is not None and 'template' not in already_processed:
already_processed.append('template')
showIndent(outfile, level)
outfile.write('template = "%s",\n' % (self.template,))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
showIndent(outfile, level)
outfile.write('format = %s,\n' % (self.format,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('resourceType', node)
if value is not None and 'resourceType' not in already_processed:
already_processed.append('resourceType')
self.resourceType = value
value = find_attr_value_('template', node)
if value is not None and 'template' not in already_processed:
already_processed.append('template')
self.template = value
value = find_attr_value_('format', node)
if value is not None and 'format' not in already_processed:
already_processed.append('format')
self.format = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class URLTemplateType
class Themes(GeneratedsSuper):
"""Provides a set of hierarchical themes that the client can use to
categorize the layers by."""
subclass = None
superclass = None
def __init__(self, Theme=None):
if Theme is None:
self.Theme = []
else:
self.Theme = Theme
def factory(*args_, **kwargs_):
if Themes.subclass:
return Themes.subclass(*args_, **kwargs_)
else:
return Themes(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Theme(self): return self.Theme
def set_Theme(self, Theme): self.Theme = Theme
def add_Theme(self, value): self.Theme.append(value)
def insert_Theme(self, index, value): self.Theme[index] = value
def export(self, outfile, level, namespace_='', name_='Themes', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Themes')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Themes'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Themes', fromsubclass_=False):
for Theme_ in self.Theme:
Theme_.export(outfile, level, namespace_, name_='Theme')
def hasContent_(self):
if (
self.Theme
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Themes'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Theme=[\n')
level += 1
for Theme_ in self.Theme:
showIndent(outfile, level)
outfile.write('model_.Theme(\n')
Theme_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Theme':
obj_ = Theme.factory()
obj_.build(child_)
self.Theme.append(obj_)
# end class Themes
class Resource(GeneratedsSuper):
"""XML encoded GetResourceByID operation response. The complexType used
by this element shall be specified by each specific OWS."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if Resource.subclass:
return Resource.subclass(*args_, **kwargs_)
else:
return Resource(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='Resource', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Resource')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Resource'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Resource', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Resource'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Resource
class GetResourceByIdType(GeneratedsSuper):
"""Request to a service to perform the GetResourceByID operation. This
operation allows a client to retrieve one or more identified
resources, including datasets and resources that describe
datasets or parameters. In this XML encoding, no "request"
parameter is included, since the element name specifies the
specific operation."""
subclass = None
superclass = None
def __init__(self, version=None, service=None, ResourceID=None, OutputFormat=None):
self.version = _cast(None, version)
self.service = _cast(None, service)
if ResourceID is None:
self.ResourceID = []
else:
self.ResourceID = ResourceID
self.OutputFormat = OutputFormat
def factory(*args_, **kwargs_):
if GetResourceByIdType.subclass:
return GetResourceByIdType.subclass(*args_, **kwargs_)
else:
return GetResourceByIdType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ResourceID(self): return self.ResourceID
def set_ResourceID(self, ResourceID): self.ResourceID = ResourceID
def add_ResourceID(self, value): self.ResourceID.append(value)
def insert_ResourceID(self, index, value): self.ResourceID[index] = value
def get_OutputFormat(self): return self.OutputFormat
def set_OutputFormat(self, OutputFormat): self.OutputFormat = OutputFormat
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_service(self): return self.service
def set_service(self, service): self.service = service
def export(self, outfile, level, namespace_='', name_='GetResourceByIdType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='GetResourceByIdType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GetResourceByIdType'):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (quote_attrib(self.version), ))
if self.service is not None and 'service' not in already_processed:
already_processed.append('service')
outfile.write(' service=%s' % (quote_attrib(self.service), ))
def exportChildren(self, outfile, level, namespace_='', name_='GetResourceByIdType', fromsubclass_=False):
for ResourceID_ in self.ResourceID:
showIndent(outfile, level)
outfile.write('<%sResourceID>%s</%sResourceID>\n' % (namespace_, self.gds_format_string(quote_xml(ResourceID_).encode(ExternalEncoding), input_name='ResourceID'), namespace_))
if self.OutputFormat is not None:
showIndent(outfile, level)
outfile.write('<%sOutputFormat>%s</%sOutputFormat>\n' % (namespace_, self.gds_format_string(quote_xml(self.OutputFormat).encode(ExternalEncoding), input_name='OutputFormat'), namespace_))
def hasContent_(self):
if (
self.ResourceID or
self.OutputFormat is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='GetResourceByIdType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
if self.service is not None and 'service' not in already_processed:
already_processed.append('service')
showIndent(outfile, level)
outfile.write('service = %s,\n' % (self.service,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('ResourceID=[\n')
level += 1
for ResourceID_ in self.ResourceID:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(ResourceID_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.OutputFormat is not None:
showIndent(outfile, level)
outfile.write('OutputFormat=%s,\n' % quote_python(self.OutputFormat).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
self.version = value
value = find_attr_value_('service', node)
if value is not None and 'service' not in already_processed:
already_processed.append('service')
self.service = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ResourceID':
ResourceID_ = child_.text
ResourceID_ = self.gds_validate_string(ResourceID_, node, 'ResourceID')
self.ResourceID.append(ResourceID_)
elif nodeName_ == 'OutputFormat':
OutputFormat_ = child_.text
OutputFormat_ = self.gds_validate_string(OutputFormat_, node, 'OutputFormat')
self.OutputFormat = OutputFormat_
# end class GetResourceByIdType
class DescriptionType(GeneratedsSuper):
"""Human-readable descriptive information for the object it is included
within. This type shall be extended if needed for specific OWS
use to include additional metadata for each type of information.
This type shall not be restricted for a specific OWS to change
the multiplicity (or optionality) of some elements. If the
xml:lang attribute is not included in a Title, Abstract or
Keyword element, then no language is specified for that element
unless specified by another means. All Title, Abstract and
Keyword elements in the same Description that share the same
xml:lang attribute value represent the description of the parent
object in that language. Multiple Title or Abstract elements
shall not exist in the same Description with the same xml:lang
attribute value unless otherwise specified."""
subclass = None
superclass = None
def __init__(self, Title=None, Abstract=None, Keywords=None):
if Title is None:
self.Title = []
else:
self.Title = Title
if Abstract is None:
self.Abstract = []
else:
self.Abstract = Abstract
if Keywords is None:
self.Keywords = []
else:
self.Keywords = Keywords
def factory(*args_, **kwargs_):
if DescriptionType.subclass:
return DescriptionType.subclass(*args_, **kwargs_)
else:
return DescriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Title(self): return self.Title
def set_Title(self, Title): self.Title = Title
def add_Title(self, value): self.Title.append(value)
def insert_Title(self, index, value): self.Title[index] = value
def get_Abstract(self): return self.Abstract
def set_Abstract(self, Abstract): self.Abstract = Abstract
def add_Abstract(self, value): self.Abstract.append(value)
def insert_Abstract(self, index, value): self.Abstract[index] = value
def get_Keywords(self): return self.Keywords
def set_Keywords(self, Keywords): self.Keywords = Keywords
def add_Keywords(self, value): self.Keywords.append(value)
def insert_Keywords(self, index, value): self.Keywords[index] = value
def export(self, outfile, level, namespace_='', name_='DescriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DescriptionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DescriptionType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='DescriptionType', fromsubclass_=False):
for Title_ in self.Title:
Title_.export(outfile, level, namespace_, name_='Title')
for Abstract_ in self.Abstract:
Abstract_.export(outfile, level, namespace_, name_='Abstract')
for Keywords_ in self.Keywords:
Keywords_.export(outfile, level, namespace_, name_='Keywords')
def hasContent_(self):
if (
self.Title or
self.Abstract or
self.Keywords
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DescriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Title=[\n')
level += 1
for Title_ in self.Title:
showIndent(outfile, level)
outfile.write('model_.Title(\n')
Title_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Abstract=[\n')
level += 1
for Abstract_ in self.Abstract:
showIndent(outfile, level)
outfile.write('model_.Abstract(\n')
Abstract_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Keywords=[\n')
level += 1
for Keywords_ in self.Keywords:
showIndent(outfile, level)
outfile.write('model_.Keywords(\n')
Keywords_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Title':
obj_ = LanguageStringType.factory()
obj_.build(child_)
self.Title.append(obj_)
elif nodeName_ == 'Abstract':
obj_ = LanguageStringType.factory()
obj_.build(child_)
self.Abstract.append(obj_)
elif nodeName_ == 'Keywords':
obj_ = KeywordsType.factory()
obj_.build(child_)
self.Keywords.append(obj_)
# end class DescriptionType
class BasicIdentificationType(DescriptionType):
"""Basic metadata identifying and describing a set of data."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, Metadata=None):
super(BasicIdentificationType, self).__init__(Title, Abstract, Keywords, )
self.Identifier = Identifier
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
def factory(*args_, **kwargs_):
if BasicIdentificationType.subclass:
return BasicIdentificationType.subclass(*args_, **kwargs_)
else:
return BasicIdentificationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_Metadata(self): return self.Metadata
def set_Metadata(self, Metadata): self.Metadata = Metadata
def add_Metadata(self, value): self.Metadata.append(value)
def insert_Metadata(self, index, value): self.Metadata[index] = value
def export(self, outfile, level, namespace_='', name_='BasicIdentificationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='BasicIdentificationType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="BasicIdentificationType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BasicIdentificationType'):
super(BasicIdentificationType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BasicIdentificationType')
def exportChildren(self, outfile, level, namespace_='', name_='BasicIdentificationType', fromsubclass_=False):
super(BasicIdentificationType, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier')
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespace_, name_='Metadata')
def hasContent_(self):
if (
self.Identifier is not None or
self.Metadata or
super(BasicIdentificationType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='BasicIdentificationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BasicIdentificationType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BasicIdentificationType, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Metadata=[\n')
level += 1
for Metadata_ in self.Metadata:
showIndent(outfile, level)
outfile.write('model_.Metadata(\n')
Metadata_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(BasicIdentificationType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'Metadata':
obj_ = MetadataType.factory()
obj_.build(child_)
self.Metadata.append(obj_)
super(BasicIdentificationType, self).buildChildren(child_, node, nodeName_, True)
# end class BasicIdentificationType
class IdentificationType(BasicIdentificationType):
"""Extended metadata identifying and describing a set of data. This
type shall be extended if needed for each specific OWS to
include additional metadata for each type of dataset. If needed,
this type should first be restricted for each specific OWS to
change the multiplicity (or optionality) of some elements."""
subclass = None
superclass = BasicIdentificationType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, Metadata=None, BoundingBox=None, OutputFormat=None, AvailableCRS=None):
super(IdentificationType, self).__init__(Title, Abstract, Keywords, Identifier, Metadata, )
if BoundingBox is None:
self.BoundingBox = []
else:
self.BoundingBox = BoundingBox
if OutputFormat is None:
self.OutputFormat = []
else:
self.OutputFormat = OutputFormat
if AvailableCRS is None:
self.AvailableCRS = []
else:
self.AvailableCRS = AvailableCRS
def factory(*args_, **kwargs_):
if IdentificationType.subclass:
return IdentificationType.subclass(*args_, **kwargs_)
else:
return IdentificationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_BoundingBox(self): return self.BoundingBox
def set_BoundingBox(self, BoundingBox): self.BoundingBox = BoundingBox
def add_BoundingBox(self, value): self.BoundingBox.append(value)
def insert_BoundingBox(self, index, value): self.BoundingBox[index] = value
def get_OutputFormat(self): return self.OutputFormat
def set_OutputFormat(self, OutputFormat): self.OutputFormat = OutputFormat
def add_OutputFormat(self, value): self.OutputFormat.append(value)
def insert_OutputFormat(self, index, value): self.OutputFormat[index] = value
def get_AvailableCRS(self): return self.AvailableCRS
def set_AvailableCRS(self, AvailableCRS): self.AvailableCRS = AvailableCRS
def add_AvailableCRS(self, value): self.AvailableCRS.append(value)
def insert_AvailableCRS(self, index, value): self.AvailableCRS[index] = value
def export(self, outfile, level, namespace_='', name_='IdentificationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='IdentificationType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="IdentificationType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IdentificationType'):
super(IdentificationType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='IdentificationType')
def exportChildren(self, outfile, level, namespace_='', name_='IdentificationType', fromsubclass_=False):
super(IdentificationType, self).exportChildren(outfile, level, namespace_, name_, True)
for BoundingBox_ in self.BoundingBox:
BoundingBox_.export(outfile, level, namespace_, name_='BoundingBox')
for OutputFormat_ in self.OutputFormat:
showIndent(outfile, level)
outfile.write('<%sOutputFormat>%s</%sOutputFormat>\n' % (namespace_, self.gds_format_string(quote_xml(OutputFormat_).encode(ExternalEncoding), input_name='OutputFormat'), namespace_))
for AvailableCRS_ in self.AvailableCRS:
showIndent(outfile, level)
outfile.write('<%sAvailableCRS>%s</%sAvailableCRS>\n' % (namespace_, self.gds_format_string(quote_xml(AvailableCRS_).encode(ExternalEncoding), input_name='AvailableCRS'), namespace_))
def hasContent_(self):
if (
self.BoundingBox or
self.OutputFormat or
self.AvailableCRS or
super(IdentificationType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IdentificationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(IdentificationType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(IdentificationType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('BoundingBox=[\n')
level += 1
for BoundingBox_ in self.BoundingBox:
showIndent(outfile, level)
outfile.write('model_.BoundingBox(\n')
BoundingBox_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('OutputFormat=[\n')
level += 1
for OutputFormat_ in self.OutputFormat:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(OutputFormat_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('AvailableCRS=[\n')
level += 1
for AvailableCRS_ in self.AvailableCRS:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(AvailableCRS_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(IdentificationType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'BoundingBox':
obj_ = BoundingBoxType.factory()
obj_.build(child_)
self.BoundingBox.append(obj_)
elif nodeName_ == 'OutputFormat':
OutputFormat_ = child_.text
OutputFormat_ = self.gds_validate_string(OutputFormat_, node, 'OutputFormat')
self.OutputFormat.append(OutputFormat_)
elif nodeName_ == 'AvailableCRS':
AvailableCRS_ = child_.text
AvailableCRS_ = self.gds_validate_string(AvailableCRS_, node, 'AvailableCRS')
self.AvailableCRS.append(AvailableCRS_)
super(IdentificationType, self).buildChildren(child_, node, nodeName_, True)
# end class IdentificationType
class MetadataType(GeneratedsSuper):
"""This element either references or contains more metadata about the
element that includes this element. To reference metadata stored
remotely, at least the xlinks:href attribute in xlink:simpleLink
shall be included. Either at least one of the attributes in
xlink:simpleLink or a substitute for the AbstractMetaData
element shall be included, but not both. An Implementation
Specification can restrict the contents of this element to
always be a reference or always contain metadata. (Informative:
This element was adapted from the metaDataProperty element in
GML 3.0.) Reference to metadata recorded elsewhere, either
external to this XML document or within it. Whenever practical,
the xlink:href attribute with type anyURI should include a URL
from which this metadata can be electronically retrieved.
Optional reference to the aspect of the element which includes
this "metadata" element that this metadata provides more
information about."""
subclass = None
superclass = None
def __init__(self, about=None, title=None, show=None, actuate=None, href=None, role=None, arcrole=None, type_=None, AbstractMetaData=None):
self.about = _cast(None, about)
self.title = _cast(None, title)
self.show = _cast(None, show)
self.actuate = _cast(None, actuate)
self.href = _cast(None, href)
self.role = _cast(None, role)
self.arcrole = _cast(None, arcrole)
self.type_ = _cast(None, type_)
self.AbstractMetaData = AbstractMetaData
def factory(*args_, **kwargs_):
if MetadataType.subclass:
return MetadataType.subclass(*args_, **kwargs_)
else:
return MetadataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AbstractMetaData(self): return self.AbstractMetaData
def set_AbstractMetaData(self, AbstractMetaData): self.AbstractMetaData = AbstractMetaData
def get_about(self): return self.about
def set_about(self, about): self.about = about
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_show(self): return self.show
def set_show(self, show): self.show = show
def get_actuate(self): return self.actuate
def set_actuate(self, actuate): self.actuate = actuate
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_role(self): return self.role
def set_role(self, role): self.role = role
def get_arcrole(self): return self.arcrole
def set_arcrole(self, arcrole): self.arcrole = arcrole
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def export(self, outfile, level, namespace_='', name_='MetadataType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='MetadataType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataType'):
if self.about is not None and 'about' not in already_processed:
already_processed.append('about')
outfile.write(' about=%s' % (self.gds_format_string(quote_attrib(self.about).encode(ExternalEncoding), input_name='about'), ))
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
outfile.write(' title=%s' % (self.gds_format_string(quote_attrib(self.title).encode(ExternalEncoding), input_name='title'), ))
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
outfile.write(' show=%s' % (self.gds_format_string(quote_attrib(self.show).encode(ExternalEncoding), input_name='show'), ))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
outfile.write(' actuate=%s' % (self.gds_format_string(quote_attrib(self.actuate).encode(ExternalEncoding), input_name='actuate'), ))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
outfile.write(' role=%s' % (self.gds_format_string(quote_attrib(self.role).encode(ExternalEncoding), input_name='role'), ))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
outfile.write(' arcrole=%s' % (self.gds_format_string(quote_attrib(self.arcrole).encode(ExternalEncoding), input_name='arcrole'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MetadataType', fromsubclass_=False):
AbstractMetaData_.export(outfile, level, namespace_, name_='AbstractMetaData')
def hasContent_(self):
if (
self.AbstractMetaData is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MetadataType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.about is not None and 'about' not in already_processed:
already_processed.append('about')
showIndent(outfile, level)
outfile.write('about = "%s",\n' % (self.about,))
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
showIndent(outfile, level)
outfile.write('title = "%s",\n' % (self.title,))
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
showIndent(outfile, level)
outfile.write('show = "%s",\n' % (self.show,))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
showIndent(outfile, level)
outfile.write('actuate = "%s",\n' % (self.actuate,))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
showIndent(outfile, level)
outfile.write('href = "%s",\n' % (self.href,))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
showIndent(outfile, level)
outfile.write('role = "%s",\n' % (self.role,))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
showIndent(outfile, level)
outfile.write('arcrole = "%s",\n' % (self.arcrole,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
if self.AbstractMetaData is not None:
showIndent(outfile, level)
outfile.write('AbstractMetaData=model_.AbstractMetaData(\n')
self.AbstractMetaData.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('about', node)
if value is not None and 'about' not in already_processed:
already_processed.append('about')
self.about = value
value = find_attr_value_('title', node)
if value is not None and 'title' not in already_processed:
already_processed.append('title')
self.title = value
value = find_attr_value_('show', node)
if value is not None and 'show' not in already_processed:
already_processed.append('show')
self.show = value
value = find_attr_value_('actuate', node)
if value is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
self.actuate = value
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.append('href')
self.href = value
value = find_attr_value_('role', node)
if value is not None and 'role' not in already_processed:
already_processed.append('role')
self.role = value
value = find_attr_value_('arcrole', node)
if value is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
self.arcrole = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AbstractMetaData':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <AbstractMetaData> element')
self.set_AbstractMetaData(obj_)
# end class MetadataType
class AbstractMetaData(GeneratedsSuper):
"""Abstract element containing more metadata about the element that
includes the containing "metadata" element. A specific server
implementation, or an Implementation Specification, can define
concrete elements in the AbstractMetaData substitution group."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if AbstractMetaData.subclass:
return AbstractMetaData.subclass(*args_, **kwargs_)
else:
return AbstractMetaData(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='AbstractMetaData', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AbstractMetaData')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractMetaData'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AbstractMetaData', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractMetaData'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AbstractMetaData
class BoundingBoxType(GeneratedsSuper):
"""XML encoded minimum rectangular bounding box (or region) parameter,
surrounding all the associated data. This type is adapted from
the EnvelopeType of GML 3.1, with modified contents and
documentation for encoding a MINIMUM size box SURROUNDING all
associated data. Usually references the definition of a CRS, as
specified in [OGC Topic 2]. Such a CRS definition can be XML
encoded using the gml:CoordinateReferenceSystemType in [GML
3.1]. For well known references, it is not required that a CRS
definition exist at the location the URI points to. If no anyURI
value is included, the applicable CRS must be either: a)
Specified outside the bounding box, but inside a data structure
that includes this bounding box, as specified for a specific OWS
use of this bounding box type. b) Fixed and specified in the
Implementation Specification for a specific OWS use of the
bounding box type. The number of dimensions in this CRS (the
length of a coordinate sequence in this use of the
PositionType). This number is specified by the CRS definition,
but can also be specified here."""
subclass = None
superclass = None
def __init__(self, crs=None, dimensions=None, LowerCorner=None, UpperCorner=None):
self.crs = _cast(None, crs)
self.dimensions = _cast(int, dimensions)
self.LowerCorner = LowerCorner
self.UpperCorner = UpperCorner
def factory(*args_, **kwargs_):
if BoundingBoxType.subclass:
return BoundingBoxType.subclass(*args_, **kwargs_)
else:
return BoundingBoxType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LowerCorner(self): return self.LowerCorner
def set_LowerCorner(self, LowerCorner): self.LowerCorner = LowerCorner
def validate_PositionType(self, value):
# Validate type PositionType, a restriction on double.
pass
def get_UpperCorner(self): return self.UpperCorner
def set_UpperCorner(self, UpperCorner): self.UpperCorner = UpperCorner
def get_crs(self): return self.crs
def set_crs(self, crs): self.crs = crs
def get_dimensions(self): return self.dimensions
def set_dimensions(self, dimensions): self.dimensions = dimensions
def export(self, outfile, level, namespace_='', name_='BoundingBoxType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='BoundingBoxType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BoundingBoxType'):
if self.crs is not None and 'crs' not in already_processed:
already_processed.append('crs')
outfile.write(' crs=%s' % (self.gds_format_string(quote_attrib(self.crs).encode(ExternalEncoding), input_name='crs'), ))
if self.dimensions is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
outfile.write(' dimensions="%s"' % self.gds_format_integer(self.dimensions, input_name='dimensions'))
def exportChildren(self, outfile, level, namespace_='', name_='BoundingBoxType', fromsubclass_=False):
if self.LowerCorner is not None:
showIndent(outfile, level)
outfile.write('<%sLowerCorner>%s</%sLowerCorner>\n' % (namespace_, self.gds_format_double_list(self.LowerCorner, input_name='LowerCorner'), namespace_))
if self.UpperCorner is not None:
showIndent(outfile, level)
outfile.write('<%sUpperCorner>%s</%sUpperCorner>\n' % (namespace_, self.gds_format_double_list(self.UpperCorner, input_name='UpperCorner'), namespace_))
def hasContent_(self):
if (
self.LowerCorner is not None or
self.UpperCorner is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='BoundingBoxType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.crs is not None and 'crs' not in already_processed:
already_processed.append('crs')
showIndent(outfile, level)
outfile.write('crs = "%s",\n' % (self.crs,))
if self.dimensions is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
showIndent(outfile, level)
outfile.write('dimensions = %d,\n' % (self.dimensions,))
def exportLiteralChildren(self, outfile, level, name_):
if self.LowerCorner is not None:
showIndent(outfile, level)
outfile.write('LowerCorner=%e,\n' % self.LowerCorner)
if self.UpperCorner is not None:
showIndent(outfile, level)
outfile.write('UpperCorner=%e,\n' % self.UpperCorner)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('crs', node)
if value is not None and 'crs' not in already_processed:
already_processed.append('crs')
self.crs = value
value = find_attr_value_('dimensions', node)
if value is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
try:
self.dimensions = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.dimensions <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LowerCorner':
LowerCorner_ = child_.text
LowerCorner_ = self.gds_validate_double_list(LowerCorner_, node, 'LowerCorner')
self.LowerCorner = LowerCorner_
self.LowerCorner = self.LowerCorner.split()
self.validate_PositionType(self.LowerCorner) # validate type PositionType
elif nodeName_ == 'UpperCorner':
UpperCorner_ = child_.text
UpperCorner_ = self.gds_validate_double_list(UpperCorner_, node, 'UpperCorner')
self.UpperCorner = UpperCorner_
self.UpperCorner = self.UpperCorner.split()
self.validate_PositionType(self.UpperCorner) # validate type PositionType
# end class BoundingBoxType
class WGS84BoundingBoxType(GeneratedsSuper):
"""XML encoded minimum rectangular bounding box (or region) parameter,
surrounding all the associated data. This box is specialized for
use with the 2D WGS 84 coordinate reference system with decimal
values of longitude and latitude. This type is adapted from the
general BoundingBoxType, with modified contents and
documentation for use with the 2D WGS 84 coordinate reference
system. This attribute can be included when considered useful.
When included, this attribute shall reference the 2D WGS 84
coordinate reference system with longitude before latitude and
decimal values of longitude and latitude. The number of
dimensions in this CRS (the length of a coordinate sequence in
this use of the PositionType). This number is specified by the
CRS definition, but can also be specified here."""
subclass = None
superclass = None
def __init__(self, crs=None, dimensions=None, LowerCorner=None, UpperCorner=None):
self.crs = _cast(None, crs)
self.dimensions = _cast(int, dimensions)
self.LowerCorner = LowerCorner
self.UpperCorner = UpperCorner
def factory(*args_, **kwargs_):
if WGS84BoundingBoxType.subclass:
return WGS84BoundingBoxType.subclass(*args_, **kwargs_)
else:
return WGS84BoundingBoxType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LowerCorner(self): return self.LowerCorner
def set_LowerCorner(self, LowerCorner): self.LowerCorner = LowerCorner
def validate_PositionType2D(self, value):
# Validate type PositionType2D, a restriction on ows:PositionType.
pass
def get_UpperCorner(self): return self.UpperCorner
def set_UpperCorner(self, UpperCorner): self.UpperCorner = UpperCorner
def get_crs(self): return self.crs
def set_crs(self, crs): self.crs = crs
def get_dimensions(self): return self.dimensions
def set_dimensions(self, dimensions): self.dimensions = dimensions
def export(self, outfile, level, namespace_='', name_='WGS84BoundingBoxType', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='WGS84BoundingBoxType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='WGS84BoundingBoxType'):
if self.crs is not None and 'crs' not in already_processed:
already_processed.append('crs')
outfile.write(' crs=%s' % (self.gds_format_string(quote_attrib(self.crs).encode(ExternalEncoding), input_name='crs'), ))
if self.dimensions is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
outfile.write(' dimensions="%s"' % self.gds_format_integer(self.dimensions, input_name='dimensions'))
def exportChildren(self, outfile, level, namespace_='', name_='WGS84BoundingBoxType', fromsubclass_=False):
if self.LowerCorner is not None:
showIndent(outfile, level)
outfile.write('<%sLowerCorner>%s</%sLowerCorner>\n' % (namespace_, self.gds_format_string(quote_xml(self.LowerCorner).encode(ExternalEncoding), input_name='LowerCorner'), namespace_))
if self.UpperCorner is not None:
showIndent(outfile, level)
outfile.write('<%sUpperCorner>%s</%sUpperCorner>\n' % (namespace_, self.gds_format_string(quote_xml(self.UpperCorner).encode(ExternalEncoding), input_name='UpperCorner'), namespace_))
def hasContent_(self):
if (
self.LowerCorner is not None or
self.UpperCorner is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='WGS84BoundingBoxType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.crs is not None and 'crs' not in already_processed:
already_processed.append('crs')
showIndent(outfile, level)
outfile.write('crs = "%s",\n' % (self.crs,))
if self.dimensions is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
showIndent(outfile, level)
outfile.write('dimensions = %d,\n' % (self.dimensions,))
def exportLiteralChildren(self, outfile, level, name_):
if self.LowerCorner is not None:
showIndent(outfile, level)
outfile.write('LowerCorner=model_.ows_PositionType(\n')
self.LowerCorner.exportLiteral(outfile, level, name_='LowerCorner')
showIndent(outfile, level)
outfile.write('),\n')
if self.UpperCorner is not None:
showIndent(outfile, level)
outfile.write('UpperCorner=model_.ows_PositionType(\n')
self.UpperCorner.exportLiteral(outfile, level, name_='UpperCorner')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('crs', node)
if value is not None and 'crs' not in already_processed:
already_processed.append('crs')
self.crs = value
value = find_attr_value_('dimensions', node)
if value is not None and 'dimensions' not in already_processed:
already_processed.append('dimensions')
try:
self.dimensions = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.dimensions <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LowerCorner':
obj_ = None
self.set_LowerCorner(obj_)
self.validate_PositionType2D(self.LowerCorner) # validate type PositionType2D
elif nodeName_ == 'UpperCorner':
obj_ = None
self.set_UpperCorner(obj_)
self.validate_PositionType2D(self.UpperCorner) # validate type PositionType2D
# end class WGS84BoundingBoxType
class LanguageStringType(GeneratedsSuper):
"""Text string with the language of the string identified as
recommended in the XML 1.0 W3C Recommendation, section 2.12."""
subclass = None
superclass = None
def __init__(self, lang=None, valueOf_=None):
self.lang = _cast(None, lang)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if LanguageStringType.subclass:
return LanguageStringType.subclass(*args_, **kwargs_)
else:
return LanguageStringType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_lang(self): return self.lang
def set_lang(self, lang): self.lang = lang
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='LanguageStringType', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='LanguageStringType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LanguageStringType'):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (self.gds_format_string(quote_attrib(self.lang).encode(ExternalEncoding), input_name='lang'), ))
def exportChildren(self, outfile, level, namespace_='', name_='LanguageStringType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='LanguageStringType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class LanguageStringType
class KeywordsType(GeneratedsSuper):
"""Unordered list of one or more commonly used or formalised word(s) or
phrase(s) used to describe the subject. When needed, the
optional "type" can name the type of the associated list of
keywords that shall all have the same type. Also when needed,
the codeSpace attribute of that "type" can reference the type
name authority and/or thesaurus. If the xml:lang attribute is
not included in a Keyword element, then no language is specified
for that element unless specified by another means. All Keyword
elements in the same Keywords element that share the same
xml:lang attribute value represent different keywords in that
language. For OWS use, the optional thesaurusName element was
omitted as being complex information that could be referenced by
the codeSpace attribute of the Type element."""
subclass = None
superclass = None
def __init__(self, Keyword=None, Type=None):
if Keyword is None:
self.Keyword = []
else:
self.Keyword = Keyword
self.Type = Type
def factory(*args_, **kwargs_):
if KeywordsType.subclass:
return KeywordsType.subclass(*args_, **kwargs_)
else:
return KeywordsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Keyword(self): return self.Keyword
def set_Keyword(self, Keyword): self.Keyword = Keyword
def add_Keyword(self, value): self.Keyword.append(value)
def insert_Keyword(self, index, value): self.Keyword[index] = value
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def export(self, outfile, level, namespace_='', name_='KeywordsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='KeywordsType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='KeywordsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='KeywordsType', fromsubclass_=False):
for Keyword_ in self.Keyword:
Keyword_.export(outfile, level, namespace_, name_='Keyword')
if self.Type:
self.Type.export(outfile, level, namespace_, name_='Type')
def hasContent_(self):
if (
self.Keyword or
self.Type is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='KeywordsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Keyword=[\n')
level += 1
for Keyword_ in self.Keyword:
showIndent(outfile, level)
outfile.write('model_.LanguageStringType(\n')
Keyword_.exportLiteral(outfile, level, name_='LanguageStringType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Type is not None:
showIndent(outfile, level)
outfile.write('Type=model_.CodeType(\n')
self.Type.exportLiteral(outfile, level, name_='Type')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Keyword':
obj_ = LanguageStringType.factory()
obj_.build(child_)
self.Keyword.append(obj_)
elif nodeName_ == 'Type':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Type(obj_)
# end class KeywordsType
class CodeType(GeneratedsSuper):
"""Name or code with an (optional) authority. If the codeSpace
attribute is present, its value shall reference a dictionary,
thesaurus, or authority for the name or code, such as the
organisation who assigned the value, or the dictionary from
which it is taken. Type copied from basicTypes.xsd of GML 3 with
documentation edited, for possible use outside the
ServiceIdentification section of a service metadata document."""
subclass = None
superclass = None
def __init__(self, codeSpace=None, valueOf_=None):
self.codeSpace = _cast(None, codeSpace)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CodeType.subclass:
return CodeType.subclass(*args_, **kwargs_)
else:
return CodeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_codeSpace(self): return self.codeSpace
def set_codeSpace(self, codeSpace): self.codeSpace = codeSpace
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='CodeType', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='CodeType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CodeType'):
if self.codeSpace is not None and 'codeSpace' not in already_processed:
already_processed.append('codeSpace')
outfile.write(' codeSpace=%s' % (self.gds_format_string(quote_attrib(self.codeSpace).encode(ExternalEncoding), input_name='codeSpace'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CodeType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CodeType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.codeSpace is not None and 'codeSpace' not in already_processed:
already_processed.append('codeSpace')
showIndent(outfile, level)
outfile.write('codeSpace = "%s",\n' % (self.codeSpace,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('codeSpace', node)
if value is not None and 'codeSpace' not in already_processed:
already_processed.append('codeSpace')
self.codeSpace = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class CodeType
class ResponsiblePartyType(GeneratedsSuper):
"""Identification of, and means of communication with, person
responsible for the server. At least one of IndividualName,
OrganisationName, or PositionName shall be included."""
subclass = None
superclass = None
def __init__(self, IndividualName=None, OrganisationName=None, PositionName=None, ContactInfo=None, Role=None):
self.IndividualName = IndividualName
self.OrganisationName = OrganisationName
self.PositionName = PositionName
self.ContactInfo = ContactInfo
self.Role = Role
def factory(*args_, **kwargs_):
if ResponsiblePartyType.subclass:
return ResponsiblePartyType.subclass(*args_, **kwargs_)
else:
return ResponsiblePartyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_IndividualName(self): return self.IndividualName
def set_IndividualName(self, IndividualName): self.IndividualName = IndividualName
def get_OrganisationName(self): return self.OrganisationName
def set_OrganisationName(self, OrganisationName): self.OrganisationName = OrganisationName
def get_PositionName(self): return self.PositionName
def set_PositionName(self, PositionName): self.PositionName = PositionName
def get_ContactInfo(self): return self.ContactInfo
def set_ContactInfo(self, ContactInfo): self.ContactInfo = ContactInfo
def get_Role(self): return self.Role
def set_Role(self, Role): self.Role = Role
def export(self, outfile, level, namespace_='', name_='ResponsiblePartyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ResponsiblePartyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResponsiblePartyType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ResponsiblePartyType', fromsubclass_=False):
if self.IndividualName is not None:
showIndent(outfile, level)
outfile.write('<%sIndividualName>%s</%sIndividualName>\n' % (namespace_, self.gds_format_string(quote_xml(self.IndividualName).encode(ExternalEncoding), input_name='IndividualName'), namespace_))
if self.OrganisationName is not None:
showIndent(outfile, level)
outfile.write('<%sOrganisationName>%s</%sOrganisationName>\n' % (namespace_, self.gds_format_string(quote_xml(self.OrganisationName).encode(ExternalEncoding), input_name='OrganisationName'), namespace_))
if self.PositionName is not None:
showIndent(outfile, level)
outfile.write('<%sPositionName>%s</%sPositionName>\n' % (namespace_, self.gds_format_string(quote_xml(self.PositionName).encode(ExternalEncoding), input_name='PositionName'), namespace_))
if self.ContactInfo:
self.ContactInfo.export(outfile, level, namespace_, name_='ContactInfo')
if self.Role:
self.Role.export(outfile, level, namespace_, name_='Role', )
def hasContent_(self):
if (
self.IndividualName is not None or
self.OrganisationName is not None or
self.PositionName is not None or
self.ContactInfo is not None or
self.Role is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResponsiblePartyType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.IndividualName is not None:
showIndent(outfile, level)
outfile.write('IndividualName=%s,\n' % quote_python(self.IndividualName).encode(ExternalEncoding))
if self.OrganisationName is not None:
showIndent(outfile, level)
outfile.write('OrganisationName=%s,\n' % quote_python(self.OrganisationName).encode(ExternalEncoding))
if self.PositionName is not None:
showIndent(outfile, level)
outfile.write('PositionName=%s,\n' % quote_python(self.PositionName).encode(ExternalEncoding))
if self.ContactInfo is not None:
showIndent(outfile, level)
outfile.write('ContactInfo=model_.ContactInfo(\n')
self.ContactInfo.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Role is not None:
showIndent(outfile, level)
outfile.write('Role=model_.Role(\n')
self.Role.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IndividualName':
IndividualName_ = child_.text
IndividualName_ = self.gds_validate_string(IndividualName_, node, 'IndividualName')
self.IndividualName = IndividualName_
elif nodeName_ == 'OrganisationName':
OrganisationName_ = child_.text
OrganisationName_ = self.gds_validate_string(OrganisationName_, node, 'OrganisationName')
self.OrganisationName = OrganisationName_
elif nodeName_ == 'PositionName':
PositionName_ = child_.text
PositionName_ = self.gds_validate_string(PositionName_, node, 'PositionName')
self.PositionName = PositionName_
elif nodeName_ == 'ContactInfo':
obj_ = ContactType.factory()
obj_.build(child_)
self.set_ContactInfo(obj_)
elif nodeName_ == 'Role':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Role(obj_)
# end class ResponsiblePartyType
class ResponsiblePartySubsetType(GeneratedsSuper):
"""Identification of, and means of communication with, person
responsible for the server. For OWS use in the ServiceProvider
section of a service metadata document, the optional
organizationName element was removed, since this type is always
used with the ProviderName element which provides that
information. The mandatory "role" element was changed to
optional, since no clear use of this information is known in the
ServiceProvider section."""
subclass = None
superclass = None
def __init__(self, IndividualName=None, PositionName=None, ContactInfo=None, Role=None):
self.IndividualName = IndividualName
self.PositionName = PositionName
self.ContactInfo = ContactInfo
self.Role = Role
def factory(*args_, **kwargs_):
if ResponsiblePartySubsetType.subclass:
return ResponsiblePartySubsetType.subclass(*args_, **kwargs_)
else:
return ResponsiblePartySubsetType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_IndividualName(self): return self.IndividualName
def set_IndividualName(self, IndividualName): self.IndividualName = IndividualName
def get_PositionName(self): return self.PositionName
def set_PositionName(self, PositionName): self.PositionName = PositionName
def get_ContactInfo(self): return self.ContactInfo
def set_ContactInfo(self, ContactInfo): self.ContactInfo = ContactInfo
def get_Role(self): return self.Role
def set_Role(self, Role): self.Role = Role
def export(self, outfile, level, namespace_='', name_='ResponsiblePartySubsetType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ResponsiblePartySubsetType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResponsiblePartySubsetType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ResponsiblePartySubsetType', fromsubclass_=False):
if self.IndividualName is not None:
showIndent(outfile, level)
outfile.write('<%sIndividualName>%s</%sIndividualName>\n' % (namespace_, self.gds_format_string(quote_xml(self.IndividualName).encode(ExternalEncoding), input_name='IndividualName'), namespace_))
if self.PositionName is not None:
showIndent(outfile, level)
outfile.write('<%sPositionName>%s</%sPositionName>\n' % (namespace_, self.gds_format_string(quote_xml(self.PositionName).encode(ExternalEncoding), input_name='PositionName'), namespace_))
if self.ContactInfo:
self.ContactInfo.export(outfile, level, namespace_, name_='ContactInfo')
if self.Role:
self.Role.export(outfile, level, namespace_, name_='Role')
def hasContent_(self):
if (
self.IndividualName is not None or
self.PositionName is not None or
self.ContactInfo is not None or
self.Role is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResponsiblePartySubsetType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.IndividualName is not None:
showIndent(outfile, level)
outfile.write('IndividualName=%s,\n' % quote_python(self.IndividualName).encode(ExternalEncoding))
if self.PositionName is not None:
showIndent(outfile, level)
outfile.write('PositionName=%s,\n' % quote_python(self.PositionName).encode(ExternalEncoding))
if self.ContactInfo is not None:
showIndent(outfile, level)
outfile.write('ContactInfo=model_.ContactInfo(\n')
self.ContactInfo.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Role is not None:
showIndent(outfile, level)
outfile.write('Role=model_.Role(\n')
self.Role.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IndividualName':
IndividualName_ = child_.text
IndividualName_ = self.gds_validate_string(IndividualName_, node, 'IndividualName')
self.IndividualName = IndividualName_
elif nodeName_ == 'PositionName':
PositionName_ = child_.text
PositionName_ = self.gds_validate_string(PositionName_, node, 'PositionName')
self.PositionName = PositionName_
elif nodeName_ == 'ContactInfo':
obj_ = ContactType.factory()
obj_.build(child_)
self.set_ContactInfo(obj_)
elif nodeName_ == 'Role':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Role(obj_)
# end class ResponsiblePartySubsetType
class ContactType(GeneratedsSuper):
"""Information required to enable contact with the responsible person
and/or organization. For OWS use in the service metadata
document, the optional hoursOfService and contactInstructions
elements were retained, as possibly being useful in the
ServiceProvider section."""
subclass = None
superclass = None
def __init__(self, Phone=None, Address=None, OnlineResource=None, HoursOfService=None, ContactInstructions=None):
self.Phone = Phone
self.Address = Address
self.OnlineResource = OnlineResource
self.HoursOfService = HoursOfService
self.ContactInstructions = ContactInstructions
def factory(*args_, **kwargs_):
if ContactType.subclass:
return ContactType.subclass(*args_, **kwargs_)
else:
return ContactType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Phone(self): return self.Phone
def set_Phone(self, Phone): self.Phone = Phone
def get_Address(self): return self.Address
def set_Address(self, Address): self.Address = Address
def get_OnlineResource(self): return self.OnlineResource
def set_OnlineResource(self, OnlineResource): self.OnlineResource = OnlineResource
def get_HoursOfService(self): return self.HoursOfService
def set_HoursOfService(self, HoursOfService): self.HoursOfService = HoursOfService
def get_ContactInstructions(self): return self.ContactInstructions
def set_ContactInstructions(self, ContactInstructions): self.ContactInstructions = ContactInstructions
def export(self, outfile, level, namespace_='', name_='ContactType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ContactType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContactType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ContactType', fromsubclass_=False):
if self.Phone:
self.Phone.export(outfile, level, namespace_, name_='Phone')
if self.Address:
self.Address.export(outfile, level, namespace_, name_='Address')
if self.OnlineResource:
self.OnlineResource.export(outfile, level, namespace_, name_='OnlineResource')
if self.HoursOfService is not None:
showIndent(outfile, level)
outfile.write('<%sHoursOfService>%s</%sHoursOfService>\n' % (namespace_, self.gds_format_string(quote_xml(self.HoursOfService).encode(ExternalEncoding), input_name='HoursOfService'), namespace_))
if self.ContactInstructions is not None:
showIndent(outfile, level)
outfile.write('<%sContactInstructions>%s</%sContactInstructions>\n' % (namespace_, self.gds_format_string(quote_xml(self.ContactInstructions).encode(ExternalEncoding), input_name='ContactInstructions'), namespace_))
def hasContent_(self):
if (
self.Phone is not None or
self.Address is not None or
self.OnlineResource is not None or
self.HoursOfService is not None or
self.ContactInstructions is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContactType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Phone is not None:
showIndent(outfile, level)
outfile.write('Phone=model_.TelephoneType(\n')
self.Phone.exportLiteral(outfile, level, name_='Phone')
showIndent(outfile, level)
outfile.write('),\n')
if self.Address is not None:
showIndent(outfile, level)
outfile.write('Address=model_.AddressType(\n')
self.Address.exportLiteral(outfile, level, name_='Address')
showIndent(outfile, level)
outfile.write('),\n')
if self.OnlineResource is not None:
showIndent(outfile, level)
outfile.write('OnlineResource=model_.OnlineResourceType(\n')
self.OnlineResource.exportLiteral(outfile, level, name_='OnlineResource')
showIndent(outfile, level)
outfile.write('),\n')
if self.HoursOfService is not None:
showIndent(outfile, level)
outfile.write('HoursOfService=%s,\n' % quote_python(self.HoursOfService).encode(ExternalEncoding))
if self.ContactInstructions is not None:
showIndent(outfile, level)
outfile.write('ContactInstructions=%s,\n' % quote_python(self.ContactInstructions).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Phone':
obj_ = TelephoneType.factory()
obj_.build(child_)
self.set_Phone(obj_)
elif nodeName_ == 'Address':
obj_ = AddressType.factory()
obj_.build(child_)
self.set_Address(obj_)
elif nodeName_ == 'OnlineResource':
obj_ = OnlineResourceType.factory()
obj_.build(child_)
self.set_OnlineResource(obj_)
elif nodeName_ == 'HoursOfService':
HoursOfService_ = child_.text
HoursOfService_ = self.gds_validate_string(HoursOfService_, node, 'HoursOfService')
self.HoursOfService = HoursOfService_
elif nodeName_ == 'ContactInstructions':
ContactInstructions_ = child_.text
ContactInstructions_ = self.gds_validate_string(ContactInstructions_, node, 'ContactInstructions')
self.ContactInstructions = ContactInstructions_
# end class ContactType
class OnlineResourceType(GeneratedsSuper):
"""Reference to on-line resource from which data can be obtained. For
OWS use in the service metadata document, the CI_OnlineResource
class was XML encoded as the attributeGroup "xlink:simpleLink",
as used in GML."""
subclass = None
superclass = None
def __init__(self, title=None, arcrole=None, actuate=None, href=None, role=None, show=None, type_=None, valueOf_=None):
self.title = _cast(None, title)
self.arcrole = _cast(None, arcrole)
self.actuate = _cast(None, actuate)
self.href = _cast(None, href)
self.role = _cast(None, role)
self.show = _cast(None, show)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if OnlineResourceType.subclass:
return OnlineResourceType.subclass(*args_, **kwargs_)
else:
return OnlineResourceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_arcrole(self): return self.arcrole
def set_arcrole(self, arcrole): self.arcrole = arcrole
def get_actuate(self): return self.actuate
def set_actuate(self, actuate): self.actuate = actuate
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_role(self): return self.role
def set_role(self, role): self.role = role
def get_show(self): return self.show
def set_show(self, show): self.show = show
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='OnlineResourceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='OnlineResourceType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OnlineResourceType'):
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
outfile.write(' title=%s' % (self.gds_format_string(quote_attrib(self.title).encode(ExternalEncoding), input_name='title'), ))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
outfile.write(' arcrole=%s' % (self.gds_format_string(quote_attrib(self.arcrole).encode(ExternalEncoding), input_name='arcrole'), ))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
outfile.write(' actuate=%s' % (self.gds_format_string(quote_attrib(self.actuate).encode(ExternalEncoding), input_name='actuate'), ))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
outfile.write(' xlink:href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='xlink:href'), ))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
outfile.write(' role=%s' % (self.gds_format_string(quote_attrib(self.role).encode(ExternalEncoding), input_name='role'), ))
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
outfile.write(' show=%s' % (self.gds_format_string(quote_attrib(self.show).encode(ExternalEncoding), input_name='show'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='OnlineResourceType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='OnlineResourceType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
showIndent(outfile, level)
outfile.write('title = "%s",\n' % (self.title,))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
showIndent(outfile, level)
outfile.write('arcrole = "%s",\n' % (self.arcrole,))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
showIndent(outfile, level)
outfile.write('actuate = "%s",\n' % (self.actuate,))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
showIndent(outfile, level)
outfile.write('href = "%s",\n' % (self.href,))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
showIndent(outfile, level)
outfile.write('role = "%s",\n' % (self.role,))
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
showIndent(outfile, level)
outfile.write('show = "%s",\n' % (self.show,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('title', node)
if value is not None and 'title' not in already_processed:
already_processed.append('title')
self.title = value
value = find_attr_value_('arcrole', node)
if value is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
self.arcrole = value
value = find_attr_value_('actuate', node)
if value is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
self.actuate = value
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.append('href')
self.href = value
value = find_attr_value_('role', node)
if value is not None and 'role' not in already_processed:
already_processed.append('role')
self.role = value
value = find_attr_value_('show', node)
if value is not None and 'show' not in already_processed:
already_processed.append('show')
self.show = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class OnlineResourceType
class TelephoneType(GeneratedsSuper):
"""Telephone numbers for contacting the responsible individual or
organization."""
subclass = None
superclass = None
def __init__(self, Voice=None, Facsimile=None):
if Voice is None:
self.Voice = []
else:
self.Voice = Voice
if Facsimile is None:
self.Facsimile = []
else:
self.Facsimile = Facsimile
def factory(*args_, **kwargs_):
if TelephoneType.subclass:
return TelephoneType.subclass(*args_, **kwargs_)
else:
return TelephoneType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Voice(self): return self.Voice
def set_Voice(self, Voice): self.Voice = Voice
def add_Voice(self, value): self.Voice.append(value)
def insert_Voice(self, index, value): self.Voice[index] = value
def get_Facsimile(self): return self.Facsimile
def set_Facsimile(self, Facsimile): self.Facsimile = Facsimile
def add_Facsimile(self, value): self.Facsimile.append(value)
def insert_Facsimile(self, index, value): self.Facsimile[index] = value
def export(self, outfile, level, namespace_='', name_='TelephoneType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TelephoneType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TelephoneType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TelephoneType', fromsubclass_=False):
for Voice_ in self.Voice:
showIndent(outfile, level)
outfile.write('<%sVoice>%s</%sVoice>\n' % (namespace_, self.gds_format_string(quote_xml(Voice_).encode(ExternalEncoding), input_name='Voice'), namespace_))
for Facsimile_ in self.Facsimile:
showIndent(outfile, level)
outfile.write('<%sFacsimile>%s</%sFacsimile>\n' % (namespace_, self.gds_format_string(quote_xml(Facsimile_).encode(ExternalEncoding), input_name='Facsimile'), namespace_))
def hasContent_(self):
if (
self.Voice or
self.Facsimile
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TelephoneType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Voice=[\n')
level += 1
for Voice_ in self.Voice:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Voice_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Facsimile=[\n')
level += 1
for Facsimile_ in self.Facsimile:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Facsimile_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Voice':
Voice_ = child_.text
Voice_ = self.gds_validate_string(Voice_, node, 'Voice')
self.Voice.append(Voice_)
elif nodeName_ == 'Facsimile':
Facsimile_ = child_.text
Facsimile_ = self.gds_validate_string(Facsimile_, node, 'Facsimile')
self.Facsimile.append(Facsimile_)
# end class TelephoneType
class AddressType(GeneratedsSuper):
"""Location of the responsible individual or organization."""
subclass = None
superclass = None
def __init__(self, DeliveryPoint=None, City=None, AdministrativeArea=None, PostalCode=None, Country=None, ElectronicMailAddress=None):
if DeliveryPoint is None:
self.DeliveryPoint = []
else:
self.DeliveryPoint = DeliveryPoint
self.City = City
self.AdministrativeArea = AdministrativeArea
self.PostalCode = PostalCode
self.Country = Country
if ElectronicMailAddress is None:
self.ElectronicMailAddress = []
else:
self.ElectronicMailAddress = ElectronicMailAddress
def factory(*args_, **kwargs_):
if AddressType.subclass:
return AddressType.subclass(*args_, **kwargs_)
else:
return AddressType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DeliveryPoint(self): return self.DeliveryPoint
def set_DeliveryPoint(self, DeliveryPoint): self.DeliveryPoint = DeliveryPoint
def add_DeliveryPoint(self, value): self.DeliveryPoint.append(value)
def insert_DeliveryPoint(self, index, value): self.DeliveryPoint[index] = value
def get_City(self): return self.City
def set_City(self, City): self.City = City
def get_AdministrativeArea(self): return self.AdministrativeArea
def set_AdministrativeArea(self, AdministrativeArea): self.AdministrativeArea = AdministrativeArea
def get_PostalCode(self): return self.PostalCode
def set_PostalCode(self, PostalCode): self.PostalCode = PostalCode
def get_Country(self): return self.Country
def set_Country(self, Country): self.Country = Country
def get_ElectronicMailAddress(self): return self.ElectronicMailAddress
def set_ElectronicMailAddress(self, ElectronicMailAddress): self.ElectronicMailAddress = ElectronicMailAddress
def add_ElectronicMailAddress(self, value): self.ElectronicMailAddress.append(value)
def insert_ElectronicMailAddress(self, index, value): self.ElectronicMailAddress[index] = value
def export(self, outfile, level, namespace_='', name_='AddressType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AddressType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AddressType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AddressType', fromsubclass_=False):
for DeliveryPoint_ in self.DeliveryPoint:
showIndent(outfile, level)
outfile.write('<%sDeliveryPoint>%s</%sDeliveryPoint>\n' % (namespace_, self.gds_format_string(quote_xml(DeliveryPoint_).encode(ExternalEncoding), input_name='DeliveryPoint'), namespace_))
if self.City is not None:
showIndent(outfile, level)
outfile.write('<%sCity>%s</%sCity>\n' % (namespace_, self.gds_format_string(quote_xml(self.City).encode(ExternalEncoding), input_name='City'), namespace_))
if self.AdministrativeArea is not None:
showIndent(outfile, level)
outfile.write('<%sAdministrativeArea>%s</%sAdministrativeArea>\n' % (namespace_, self.gds_format_string(quote_xml(self.AdministrativeArea).encode(ExternalEncoding), input_name='AdministrativeArea'), namespace_))
if self.PostalCode is not None:
showIndent(outfile, level)
outfile.write('<%sPostalCode>%s</%sPostalCode>\n' % (namespace_, self.gds_format_string(quote_xml(self.PostalCode).encode(ExternalEncoding), input_name='PostalCode'), namespace_))
if self.Country is not None:
showIndent(outfile, level)
outfile.write('<%sCountry>%s</%sCountry>\n' % (namespace_, self.gds_format_string(quote_xml(self.Country).encode(ExternalEncoding), input_name='Country'), namespace_))
for ElectronicMailAddress_ in self.ElectronicMailAddress:
showIndent(outfile, level)
outfile.write('<%sElectronicMailAddress>%s</%sElectronicMailAddress>\n' % (namespace_, self.gds_format_string(quote_xml(ElectronicMailAddress_).encode(ExternalEncoding), input_name='ElectronicMailAddress'), namespace_))
def hasContent_(self):
if (
self.DeliveryPoint or
self.City is not None or
self.AdministrativeArea is not None or
self.PostalCode is not None or
self.Country is not None or
self.ElectronicMailAddress
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AddressType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('DeliveryPoint=[\n')
level += 1
for DeliveryPoint_ in self.DeliveryPoint:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(DeliveryPoint_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.City is not None:
showIndent(outfile, level)
outfile.write('City=%s,\n' % quote_python(self.City).encode(ExternalEncoding))
if self.AdministrativeArea is not None:
showIndent(outfile, level)
outfile.write('AdministrativeArea=%s,\n' % quote_python(self.AdministrativeArea).encode(ExternalEncoding))
if self.PostalCode is not None:
showIndent(outfile, level)
outfile.write('PostalCode=%s,\n' % quote_python(self.PostalCode).encode(ExternalEncoding))
if self.Country is not None:
showIndent(outfile, level)
outfile.write('Country=%s,\n' % quote_python(self.Country).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('ElectronicMailAddress=[\n')
level += 1
for ElectronicMailAddress_ in self.ElectronicMailAddress:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(ElectronicMailAddress_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DeliveryPoint':
DeliveryPoint_ = child_.text
DeliveryPoint_ = self.gds_validate_string(DeliveryPoint_, node, 'DeliveryPoint')
self.DeliveryPoint.append(DeliveryPoint_)
elif nodeName_ == 'City':
City_ = child_.text
City_ = self.gds_validate_string(City_, node, 'City')
self.City = City_
elif nodeName_ == 'AdministrativeArea':
AdministrativeArea_ = child_.text
AdministrativeArea_ = self.gds_validate_string(AdministrativeArea_, node, 'AdministrativeArea')
self.AdministrativeArea = AdministrativeArea_
elif nodeName_ == 'PostalCode':
PostalCode_ = child_.text
PostalCode_ = self.gds_validate_string(PostalCode_, node, 'PostalCode')
self.PostalCode = PostalCode_
elif nodeName_ == 'Country':
Country_ = child_.text
Country_ = self.gds_validate_string(Country_, node, 'Country')
self.Country = Country_
elif nodeName_ == 'ElectronicMailAddress':
ElectronicMailAddress_ = child_.text
ElectronicMailAddress_ = self.gds_validate_string(ElectronicMailAddress_, node, 'ElectronicMailAddress')
self.ElectronicMailAddress.append(ElectronicMailAddress_)
# end class AddressType
class CapabilitiesBaseType(GeneratedsSuper):
"""XML encoded GetCapabilities operation response. This document
provides clients with service metadata about a specific service
instance, usually including metadata about the tightly-coupled
data served. If the server does not implement the updateSequence
parameter, the server shall always return the complete
Capabilities document, without the updateSequence parameter.
When the server implements the updateSequence parameter and the
GetCapabilities operation request included the updateSequence
parameter with the current value, the server shall return this
element with only the "version" and "updateSequence" attributes.
Otherwise, all optional elements shall be included or not
depending on the actual value of the Contents parameter in the
GetCapabilities operation request. This base type shall be
extended by each specific OWS to include the additional contents
needed. Service metadata document version, having values that
are "increased" whenever any change is made in service metadata
document. Values are selected by each server, and are always
opaque to clients. When not supported by server, server shall
not return this attribute."""
subclass = None
superclass = None
def __init__(self, updateSequence=None, version=None, ServiceIdentification=None, ServiceProvider=None, OperationsMetadata=None):
self.updateSequence = _cast(None, updateSequence)
self.version = _cast(None, version)
self.ServiceIdentification = ServiceIdentification
self.ServiceProvider = ServiceProvider
self.OperationsMetadata = OperationsMetadata
def factory(*args_, **kwargs_):
if CapabilitiesBaseType.subclass:
return CapabilitiesBaseType.subclass(*args_, **kwargs_)
else:
return CapabilitiesBaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ServiceIdentification(self): return self.ServiceIdentification
def set_ServiceIdentification(self, ServiceIdentification): self.ServiceIdentification = ServiceIdentification
def get_ServiceProvider(self): return self.ServiceProvider
def set_ServiceProvider(self, ServiceProvider): self.ServiceProvider = ServiceProvider
def get_OperationsMetadata(self): return self.OperationsMetadata
def set_OperationsMetadata(self, OperationsMetadata): self.OperationsMetadata = OperationsMetadata
def get_updateSequence(self): return self.updateSequence
def set_updateSequence(self, updateSequence): self.updateSequence = updateSequence
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='', name_='CapabilitiesBaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='CapabilitiesBaseType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CapabilitiesBaseType'):
if self.updateSequence is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
outfile.write(' updateSequence=%s' % (quote_attrib(self.updateSequence), ))
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (quote_attrib(self.version), ))
def exportChildren(self, outfile, level, namespace_='', name_='CapabilitiesBaseType', fromsubclass_=False):
if self.ServiceIdentification:
self.ServiceIdentification.export(outfile, level, namespace_, name_='ServiceIdentification')
if self.ServiceProvider:
self.ServiceProvider.export(outfile, level, namespace_, name_='ServiceProvider')
if self.OperationsMetadata:
self.OperationsMetadata.export(outfile, level, namespace_, name_='OperationsMetadata')
def hasContent_(self):
if (
self.ServiceIdentification is not None or
self.ServiceProvider is not None or
self.OperationsMetadata is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CapabilitiesBaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.updateSequence is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
showIndent(outfile, level)
outfile.write('updateSequence = %s,\n' % (self.updateSequence,))
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
def exportLiteralChildren(self, outfile, level, name_):
if self.ServiceIdentification is not None:
showIndent(outfile, level)
outfile.write('ServiceIdentification=model_.ServiceIdentification(\n')
self.ServiceIdentification.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ServiceProvider is not None:
showIndent(outfile, level)
outfile.write('ServiceProvider=model_.ServiceProvider(\n')
self.ServiceProvider.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.OperationsMetadata is not None:
showIndent(outfile, level)
outfile.write('OperationsMetadata=model_.OperationsMetadata(\n')
self.OperationsMetadata.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('updateSequence', node)
if value is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
self.updateSequence = value
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
self.version = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ServiceIdentification':
obj_ = ServiceIdentification.factory()
obj_.build(child_)
self.set_ServiceIdentification(obj_)
elif nodeName_ == 'ServiceProvider':
obj_ = ServiceProvider.factory()
obj_.build(child_)
self.set_ServiceProvider(obj_)
elif nodeName_ == 'OperationsMetadata':
obj_ = OperationsMetadata.factory()
obj_.build(child_)
self.set_OperationsMetadata(obj_)
# end class CapabilitiesBaseType
class GetCapabilitiesType(GeneratedsSuper):
"""XML encoded GetCapabilities operation request. This operation allows
clients to retrieve service metadata about a specific service
instance. In this XML encoding, no "request" parameter is
included, since the element name specifies the specific
operation. This base type shall be extended by each specific OWS
to include the additional required "service" attribute, with the
correct value for that OWS. When omitted or not supported by
server, server shall return latest complete service metadata
document."""
subclass = None
superclass = None
def __init__(self, updateSequence=None, AcceptVersions=None, Sections=None, AcceptFormats=None):
self.updateSequence = _cast(None, updateSequence)
self.AcceptVersions = AcceptVersions
self.Sections = Sections
self.AcceptFormats = AcceptFormats
def factory(*args_, **kwargs_):
if GetCapabilitiesType.subclass:
return GetCapabilitiesType.subclass(*args_, **kwargs_)
else:
return GetCapabilitiesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AcceptVersions(self): return self.AcceptVersions
def set_AcceptVersions(self, AcceptVersions): self.AcceptVersions = AcceptVersions
def get_Sections(self): return self.Sections
def set_Sections(self, Sections): self.Sections = Sections
def get_AcceptFormats(self): return self.AcceptFormats
def set_AcceptFormats(self, AcceptFormats): self.AcceptFormats = AcceptFormats
def get_updateSequence(self): return self.updateSequence
def set_updateSequence(self, updateSequence): self.updateSequence = updateSequence
def export(self, outfile, level, namespace_='', name_='GetCapabilitiesType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='GetCapabilitiesType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='GetCapabilitiesType'):
if self.updateSequence is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
outfile.write(' updateSequence=%s' % (quote_attrib(self.updateSequence), ))
def exportChildren(self, outfile, level, namespace_='', name_='GetCapabilitiesType', fromsubclass_=False):
if self.AcceptVersions:
self.AcceptVersions.export(outfile, level, namespace_, name_='AcceptVersions')
if self.Sections:
self.Sections.export(outfile, level, namespace_, name_='Sections')
if self.AcceptFormats:
self.AcceptFormats.export(outfile, level, namespace_, name_='AcceptFormats')
def hasContent_(self):
if (
self.AcceptVersions is not None or
self.Sections is not None or
self.AcceptFormats is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='GetCapabilitiesType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.updateSequence is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
showIndent(outfile, level)
outfile.write('updateSequence = %s,\n' % (self.updateSequence,))
def exportLiteralChildren(self, outfile, level, name_):
if self.AcceptVersions is not None:
showIndent(outfile, level)
outfile.write('AcceptVersions=model_.AcceptVersionsType(\n')
self.AcceptVersions.exportLiteral(outfile, level, name_='AcceptVersions')
showIndent(outfile, level)
outfile.write('),\n')
if self.Sections is not None:
showIndent(outfile, level)
outfile.write('Sections=model_.SectionsType(\n')
self.Sections.exportLiteral(outfile, level, name_='Sections')
showIndent(outfile, level)
outfile.write('),\n')
if self.AcceptFormats is not None:
showIndent(outfile, level)
outfile.write('AcceptFormats=model_.AcceptFormatsType(\n')
self.AcceptFormats.exportLiteral(outfile, level, name_='AcceptFormats')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('updateSequence', node)
if value is not None and 'updateSequence' not in already_processed:
already_processed.append('updateSequence')
self.updateSequence = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AcceptVersions':
obj_ = AcceptVersionsType.factory()
obj_.build(child_)
self.set_AcceptVersions(obj_)
elif nodeName_ == 'Sections':
obj_ = SectionsType.factory()
obj_.build(child_)
self.set_Sections(obj_)
elif nodeName_ == 'AcceptFormats':
obj_ = AcceptFormatsType.factory()
obj_.build(child_)
self.set_AcceptFormats(obj_)
# end class GetCapabilitiesType
class AcceptVersionsType(GeneratedsSuper):
"""Prioritized sequence of one or more specification versions accepted
by client, with preferred versions listed first. See Version
negotiation subclause for more information."""
subclass = None
superclass = None
def __init__(self, Version=None):
if Version is None:
self.Version = []
else:
self.Version = Version
def factory(*args_, **kwargs_):
if AcceptVersionsType.subclass:
return AcceptVersionsType.subclass(*args_, **kwargs_)
else:
return AcceptVersionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Version(self): return self.Version
def set_Version(self, Version): self.Version = Version
def add_Version(self, value): self.Version.append(value)
def insert_Version(self, index, value): self.Version[index] = value
def validate_VersionType(self, value):
# Validate type VersionType, a restriction on string.
pass
def export(self, outfile, level, namespace_='', name_='AcceptVersionsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AcceptVersionsType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AcceptVersionsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AcceptVersionsType', fromsubclass_=False):
for Version_ in self.Version:
showIndent(outfile, level)
outfile.write('<%sVersion>%s</%sVersion>\n' % (namespace_, self.gds_format_string(quote_xml(Version_).encode(ExternalEncoding), input_name='Version'), namespace_))
def hasContent_(self):
if (
self.Version
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AcceptVersionsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Version=[\n')
level += 1
for Version_ in self.Version:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Version_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Version':
Version_ = child_.text
Version_ = self.gds_validate_string(Version_, node, 'Version')
self.Version.append(Version_)
self.validate_VersionType(self.Version) # validate type VersionType
# end class AcceptVersionsType
class SectionsType(GeneratedsSuper):
"""Unordered list of zero or more names of requested sections in
complete service metadata document. Each Section value shall
contain an allowed section name as specified by each OWS
specification. See Sections parameter subclause for more
information."""
subclass = None
superclass = None
def __init__(self, Section=None):
if Section is None:
self.Section = []
else:
self.Section = Section
def factory(*args_, **kwargs_):
if SectionsType.subclass:
return SectionsType.subclass(*args_, **kwargs_)
else:
return SectionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Section(self): return self.Section
def set_Section(self, Section): self.Section = Section
def add_Section(self, value): self.Section.append(value)
def insert_Section(self, index, value): self.Section[index] = value
def export(self, outfile, level, namespace_='', name_='SectionsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='SectionsType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SectionsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SectionsType', fromsubclass_=False):
for Section_ in self.Section:
showIndent(outfile, level)
outfile.write('<%sSection>%s</%sSection>\n' % (namespace_, self.gds_format_string(quote_xml(Section_).encode(ExternalEncoding), input_name='Section'), namespace_))
def hasContent_(self):
if (
self.Section
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='SectionsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Section=[\n')
level += 1
for Section_ in self.Section:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Section_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Section':
Section_ = child_.text
Section_ = self.gds_validate_string(Section_, node, 'Section')
self.Section.append(Section_)
# end class SectionsType
class AcceptFormatsType(GeneratedsSuper):
"""Prioritized sequence of zero or more GetCapabilities operation
response formats desired by client, with preferred formats
listed first. Each response format shall be identified by its
MIME type. See AcceptFormats parameter use subclause for more
information."""
subclass = None
superclass = None
def __init__(self, OutputFormat=None):
if OutputFormat is None:
self.OutputFormat = []
else:
self.OutputFormat = OutputFormat
def factory(*args_, **kwargs_):
if AcceptFormatsType.subclass:
return AcceptFormatsType.subclass(*args_, **kwargs_)
else:
return AcceptFormatsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_OutputFormat(self): return self.OutputFormat
def set_OutputFormat(self, OutputFormat): self.OutputFormat = OutputFormat
def add_OutputFormat(self, value): self.OutputFormat.append(value)
def insert_OutputFormat(self, index, value): self.OutputFormat[index] = value
def validate_MimeType(self, value):
# Validate type MimeType, a restriction on string.
pass
def export(self, outfile, level, namespace_='', name_='AcceptFormatsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AcceptFormatsType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AcceptFormatsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AcceptFormatsType', fromsubclass_=False):
for OutputFormat_ in self.OutputFormat:
showIndent(outfile, level)
outfile.write('<%sOutputFormat>%s</%sOutputFormat>\n' % (namespace_, self.gds_format_string(quote_xml(OutputFormat_).encode(ExternalEncoding), input_name='OutputFormat'), namespace_))
def hasContent_(self):
if (
self.OutputFormat
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AcceptFormatsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('OutputFormat=[\n')
level += 1
for OutputFormat_ in self.OutputFormat:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(OutputFormat_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'OutputFormat':
OutputFormat_ = child_.text
OutputFormat_ = self.gds_validate_string(OutputFormat_, node, 'OutputFormat')
self.OutputFormat.append(OutputFormat_)
self.validate_MimeType(self.OutputFormat) # validate type MimeType
# end class AcceptFormatsType
class ServiceIdentification(DescriptionType):
"""General metadata for this specific server. This XML Schema of this
section shall be the same for all OWS."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, ServiceType=None, ServiceTypeVersion=None, Profile=None, Fees=None, AccessConstraints=None):
super(ServiceIdentification, self).__init__(Title, Abstract, Keywords, )
self.ServiceType = ServiceType
if ServiceTypeVersion is None:
self.ServiceTypeVersion = []
else:
self.ServiceTypeVersion = ServiceTypeVersion
if Profile is None:
self.Profile = []
else:
self.Profile = Profile
self.Fees = Fees
if AccessConstraints is None:
self.AccessConstraints = []
else:
self.AccessConstraints = AccessConstraints
def factory(*args_, **kwargs_):
if ServiceIdentification.subclass:
return ServiceIdentification.subclass(*args_, **kwargs_)
else:
return ServiceIdentification(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ServiceType(self): return self.ServiceType
def set_ServiceType(self, ServiceType): self.ServiceType = ServiceType
def get_ServiceTypeVersion(self): return self.ServiceTypeVersion
def set_ServiceTypeVersion(self, ServiceTypeVersion): self.ServiceTypeVersion = ServiceTypeVersion
def add_ServiceTypeVersion(self, value): self.ServiceTypeVersion.append(value)
def insert_ServiceTypeVersion(self, index, value): self.ServiceTypeVersion[index] = value
def validate_VersionType(self, value):
# Validate type VersionType, a restriction on string.
pass
def get_Profile(self): return self.Profile
def set_Profile(self, Profile): self.Profile = Profile
def add_Profile(self, value): self.Profile.append(value)
def insert_Profile(self, index, value): self.Profile[index] = value
def get_Fees(self): return self.Fees
def set_Fees(self, Fees): self.Fees = Fees
def get_AccessConstraints(self): return self.AccessConstraints
def set_AccessConstraints(self, AccessConstraints): self.AccessConstraints = AccessConstraints
def add_AccessConstraints(self, value): self.AccessConstraints.append(value)
def insert_AccessConstraints(self, index, value): self.AccessConstraints[index] = value
def export(self, outfile, level, namespace_='', name_='ServiceIdentification', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ServiceIdentification')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ServiceIdentification"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceIdentification'):
super(ServiceIdentification, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceIdentification')
def exportChildren(self, outfile, level, namespace_='', name_='ServiceIdentification', fromsubclass_=False):
super(ServiceIdentification, self).exportChildren(outfile, level, namespace_, name_, True)
if self.ServiceType:
self.ServiceType.export(outfile, level, namespace_, name_='ServiceType', )
for ServiceTypeVersion_ in self.ServiceTypeVersion:
showIndent(outfile, level)
outfile.write('<%sServiceTypeVersion>%s</%sServiceTypeVersion>\n' % (namespace_, self.gds_format_string(quote_xml(ServiceTypeVersion_).encode(ExternalEncoding), input_name='ServiceTypeVersion'), namespace_))
for Profile_ in self.Profile:
showIndent(outfile, level)
outfile.write('<%sProfile>%s</%sProfile>\n' % (namespace_, self.gds_format_string(quote_xml(Profile_).encode(ExternalEncoding), input_name='Profile'), namespace_))
if self.Fees is not None:
showIndent(outfile, level)
outfile.write('<%sFees>%s</%sFees>\n' % (namespace_, self.gds_format_string(quote_xml(self.Fees).encode(ExternalEncoding), input_name='Fees'), namespace_))
for AccessConstraints_ in self.AccessConstraints:
showIndent(outfile, level)
outfile.write('<%sAccessConstraints>%s</%sAccessConstraints>\n' % (namespace_, self.gds_format_string(quote_xml(AccessConstraints_).encode(ExternalEncoding), input_name='AccessConstraints'), namespace_))
def hasContent_(self):
if (
self.ServiceType is not None or
self.ServiceTypeVersion or
self.Profile or
self.Fees is not None or
self.AccessConstraints or
super(ServiceIdentification, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceIdentification'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ServiceIdentification, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ServiceIdentification, self).exportLiteralChildren(outfile, level, name_)
if self.ServiceType is not None:
showIndent(outfile, level)
outfile.write('ServiceType=model_.CodeType(\n')
self.ServiceType.exportLiteral(outfile, level, name_='ServiceType')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('ServiceTypeVersion=[\n')
level += 1
for ServiceTypeVersion_ in self.ServiceTypeVersion:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(ServiceTypeVersion_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Profile=[\n')
level += 1
for Profile_ in self.Profile:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Profile_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Fees is not None:
showIndent(outfile, level)
outfile.write('Fees=%s,\n' % quote_python(self.Fees).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('AccessConstraints=[\n')
level += 1
for AccessConstraints_ in self.AccessConstraints:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(AccessConstraints_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ServiceIdentification, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ServiceType':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_ServiceType(obj_)
elif nodeName_ == 'ServiceTypeVersion':
ServiceTypeVersion_ = child_.text
ServiceTypeVersion_ = self.gds_validate_string(ServiceTypeVersion_, node, 'ServiceTypeVersion')
self.ServiceTypeVersion.append(ServiceTypeVersion_)
self.validate_VersionType(self.ServiceTypeVersion) # validate type VersionType
elif nodeName_ == 'Profile':
Profile_ = child_.text
Profile_ = self.gds_validate_string(Profile_, node, 'Profile')
self.Profile.append(Profile_)
elif nodeName_ == 'Fees':
Fees_ = child_.text
Fees_ = self.gds_validate_string(Fees_, node, 'Fees')
self.Fees = Fees_
elif nodeName_ == 'AccessConstraints':
AccessConstraints_ = child_.text
AccessConstraints_ = self.gds_validate_string(AccessConstraints_, node, 'AccessConstraints')
self.AccessConstraints.append(AccessConstraints_)
super(ServiceIdentification, self).buildChildren(child_, node, nodeName_, True)
# end class ServiceIdentification
class ServiceProvider(GeneratedsSuper):
"""Metadata about the organization that provides this specific service
instance or server."""
subclass = None
superclass = None
def __init__(self, ProviderName=None, ProviderSite=None, ServiceContact=None):
self.ProviderName = ProviderName
self.ProviderSite = ProviderSite
self.ServiceContact = ServiceContact
def factory(*args_, **kwargs_):
if ServiceProvider.subclass:
return ServiceProvider.subclass(*args_, **kwargs_)
else:
return ServiceProvider(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ProviderName(self): return self.ProviderName
def set_ProviderName(self, ProviderName): self.ProviderName = ProviderName
def get_ProviderSite(self): return self.ProviderSite
def set_ProviderSite(self, ProviderSite): self.ProviderSite = ProviderSite
def get_ServiceContact(self): return self.ServiceContact
def set_ServiceContact(self, ServiceContact): self.ServiceContact = ServiceContact
def export(self, outfile, level, namespace_='', name_='ServiceProvider', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ServiceProvider')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceProvider'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ServiceProvider', fromsubclass_=False):
if self.ProviderName is not None:
showIndent(outfile, level)
outfile.write('<%sProviderName>%s</%sProviderName>\n' % (namespace_, self.gds_format_string(quote_xml(self.ProviderName).encode(ExternalEncoding), input_name='ProviderName'), namespace_))
if self.ProviderSite:
self.ProviderSite.export(outfile, level, namespace_, name_='ProviderSite')
if self.ServiceContact:
self.ServiceContact.export(outfile, level, namespace_, name_='ServiceContact', )
def hasContent_(self):
if (
self.ProviderName is not None or
self.ProviderSite is not None or
self.ServiceContact is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceProvider'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.ProviderName is not None:
showIndent(outfile, level)
outfile.write('ProviderName=%s,\n' % quote_python(self.ProviderName).encode(ExternalEncoding))
if self.ProviderSite is not None:
showIndent(outfile, level)
outfile.write('ProviderSite=model_.OnlineResourceType(\n')
self.ProviderSite.exportLiteral(outfile, level, name_='ProviderSite')
showIndent(outfile, level)
outfile.write('),\n')
if self.ServiceContact is not None:
showIndent(outfile, level)
outfile.write('ServiceContact=model_.ResponsiblePartySubsetType(\n')
self.ServiceContact.exportLiteral(outfile, level, name_='ServiceContact')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ProviderName':
ProviderName_ = child_.text
ProviderName_ = self.gds_validate_string(ProviderName_, node, 'ProviderName')
self.ProviderName = ProviderName_
elif nodeName_ == 'ProviderSite':
obj_ = OnlineResourceType.factory()
obj_.build(child_)
self.set_ProviderSite(obj_)
elif nodeName_ == 'ServiceContact':
obj_ = ResponsiblePartySubsetType.factory()
obj_.build(child_)
self.set_ServiceContact(obj_)
# end class ServiceProvider
class OperationsMetadata(GeneratedsSuper):
"""Metadata about the operations and related abilities specified by
this service and implemented by this server, including the URLs
for operation requests. The basic contents of this section shall
be the same for all OWS types, but individual services can add
elements and/or change the optionality of optional elements."""
subclass = None
superclass = None
def __init__(self, Operation=None, Parameter=None, Constraint=None, ExtendedCapabilities=None):
if Operation is None:
self.Operation = []
else:
self.Operation = Operation
if Parameter is None:
self.Parameter = []
else:
self.Parameter = Parameter
if Constraint is None:
self.Constraint = []
else:
self.Constraint = Constraint
self.ExtendedCapabilities = ExtendedCapabilities
def factory(*args_, **kwargs_):
if OperationsMetadata.subclass:
return OperationsMetadata.subclass(*args_, **kwargs_)
else:
return OperationsMetadata(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Operation(self): return self.Operation
def set_Operation(self, Operation): self.Operation = Operation
def add_Operation(self, value): self.Operation.append(value)
def insert_Operation(self, index, value): self.Operation[index] = value
def get_Parameter(self): return self.Parameter
def set_Parameter(self, Parameter): self.Parameter = Parameter
def add_Parameter(self, value): self.Parameter.append(value)
def insert_Parameter(self, index, value): self.Parameter[index] = value
def get_Constraint(self): return self.Constraint
def set_Constraint(self, Constraint): self.Constraint = Constraint
def add_Constraint(self, value): self.Constraint.append(value)
def insert_Constraint(self, index, value): self.Constraint[index] = value
def get_ExtendedCapabilities(self): return self.ExtendedCapabilities
def set_ExtendedCapabilities(self, ExtendedCapabilities): self.ExtendedCapabilities = ExtendedCapabilities
def export(self, outfile, level, namespace_='', name_='OperationsMetadata', namespacedef_=''):
if namespace_ == '':
namespace_ = 'ows:'
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='OperationsMetadata')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OperationsMetadata'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='OperationsMetadata', fromsubclass_=False):
for Operation_ in self.Operation:
Operation_.export(outfile, level, namespace_, name_='Operation')
for Parameter_ in self.Parameter:
Parameter_.export(outfile, level, namespace_, name_='Parameter')
for Constraint_ in self.Constraint:
Constraint_.export(outfile, level, namespace_, name_='Constraint')
if self.ExtendedCapabilities is not None:
showIndent(outfile, level)
outfile.write('<%sExtendedCapabilities>%s</%sExtendedCapabilities>\n' % (namespace_, self.gds_format_string(quote_xml(self.ExtendedCapabilities).encode(ExternalEncoding), input_name='ExtendedCapabilities'), namespace_))
def hasContent_(self):
if (
self.Operation or
self.Parameter or
self.Constraint or
self.ExtendedCapabilities is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='OperationsMetadata'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Operation=[\n')
level += 1
for Operation_ in self.Operation:
showIndent(outfile, level)
outfile.write('model_.Operation(\n')
Operation_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Parameter=[\n')
level += 1
for Parameter_ in self.Parameter:
showIndent(outfile, level)
outfile.write('model_.DomainType(\n')
Parameter_.exportLiteral(outfile, level, name_='DomainType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Constraint=[\n')
level += 1
for Constraint_ in self.Constraint:
showIndent(outfile, level)
outfile.write('model_.DomainType(\n')
Constraint_.exportLiteral(outfile, level, name_='DomainType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.ExtendedCapabilities is not None:
showIndent(outfile, level)
outfile.write('ExtendedCapabilities=%s,\n' % quote_python(self.ExtendedCapabilities).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Operation':
obj_ = Operation.factory()
obj_.build(child_)
self.Operation.append(obj_)
elif nodeName_ == 'Parameter':
obj_ = DomainType.factory()
obj_.build(child_)
self.Parameter.append(obj_)
elif nodeName_ == 'Constraint':
obj_ = DomainType.factory()
obj_.build(child_)
self.Constraint.append(obj_)
elif nodeName_ == 'ExtendedCapabilities':
ExtendedCapabilities_ = child_.text
ExtendedCapabilities_ = self.gds_validate_string(ExtendedCapabilities_, node, 'ExtendedCapabilities')
self.ExtendedCapabilities = ExtendedCapabilities_
# end class OperationsMetadata
class Operation(GeneratedsSuper):
"""Metadata for one operation that this server implements. Name or
identifier of this operation (request) (for example,
GetCapabilities). The list of required and optional operations
implemented shall be specified in the Implementation
Specification for this service."""
subclass = None
superclass = None
def __init__(self, name=None, DCP=None, Parameter=None, Constraint=None, Metadata=None):
self.name = _cast(None, name)
if DCP is None:
self.DCP = []
else:
self.DCP = DCP
if Parameter is None:
self.Parameter = []
else:
self.Parameter = Parameter
if Constraint is None:
self.Constraint = []
else:
self.Constraint = Constraint
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
def factory(*args_, **kwargs_):
if Operation.subclass:
return Operation.subclass(*args_, **kwargs_)
else:
return Operation(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DCP(self): return self.DCP
def set_DCP(self, DCP): self.DCP = DCP
def add_DCP(self, value): self.DCP.append(value)
def insert_DCP(self, index, value): self.DCP[index] = value
def get_Parameter(self): return self.Parameter
def set_Parameter(self, Parameter): self.Parameter = Parameter
def add_Parameter(self, value): self.Parameter.append(value)
def insert_Parameter(self, index, value): self.Parameter[index] = value
def get_Constraint(self): return self.Constraint
def set_Constraint(self, Constraint): self.Constraint = Constraint
def add_Constraint(self, value): self.Constraint.append(value)
def insert_Constraint(self, index, value): self.Constraint[index] = value
def get_Metadata(self): return self.Metadata
def set_Metadata(self, Metadata): self.Metadata = Metadata
def add_Metadata(self, value): self.Metadata.append(value)
def insert_Metadata(self, index, value): self.Metadata[index] = value
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='', name_='Operation', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Operation')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Operation'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='Operation', fromsubclass_=False):
for DCP_ in self.DCP:
DCP_.export(outfile, level, namespace_, name_='DCP')
for Parameter_ in self.Parameter:
Parameter_.export(outfile, level, namespace_, name_='Parameter')
for Constraint_ in self.Constraint:
Constraint_.export(outfile, level, namespace_, name_='Constraint')
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespace_, name_='Metadata')
def hasContent_(self):
if (
self.DCP or
self.Parameter or
self.Constraint or
self.Metadata
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Operation'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('DCP=[\n')
level += 1
for DCP_ in self.DCP:
showIndent(outfile, level)
outfile.write('model_.DCP(\n')
DCP_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Parameter=[\n')
level += 1
for Parameter_ in self.Parameter:
showIndent(outfile, level)
outfile.write('model_.DomainType(\n')
Parameter_.exportLiteral(outfile, level, name_='DomainType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Constraint=[\n')
level += 1
for Constraint_ in self.Constraint:
showIndent(outfile, level)
outfile.write('model_.DomainType(\n')
Constraint_.exportLiteral(outfile, level, name_='DomainType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Metadata=[\n')
level += 1
for Metadata_ in self.Metadata:
showIndent(outfile, level)
outfile.write('model_.Metadata(\n')
Metadata_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DCP':
obj_ = DCP.factory()
obj_.build(child_)
self.DCP.append(obj_)
elif nodeName_ == 'Parameter':
obj_ = DomainType.factory()
obj_.build(child_)
self.Parameter.append(obj_)
elif nodeName_ == 'Constraint':
obj_ = DomainType.factory()
obj_.build(child_)
self.Constraint.append(obj_)
elif nodeName_ == 'Metadata':
obj_ = MetadataType.factory()
obj_.build(child_)
self.Metadata.append(obj_)
# end class Operation
class DCP(GeneratedsSuper):
"""Information for one distributed Computing Platform (DCP) supported
for this operation. At present, only the HTTP DCP is defined, so
this element only includes the HTTP element."""
subclass = None
superclass = None
def __init__(self, HTTP=None):
self.HTTP = HTTP
def factory(*args_, **kwargs_):
if DCP.subclass:
return DCP.subclass(*args_, **kwargs_)
else:
return DCP(*args_, **kwargs_)
factory = staticmethod(factory)
def get_HTTP(self): return self.HTTP
def set_HTTP(self, HTTP): self.HTTP = HTTP
def export(self, outfile, level, namespace_='', name_='DCP', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DCP')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DCP'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='DCP', fromsubclass_=False):
if self.HTTP:
self.HTTP.export(outfile, level, namespace_, name_='HTTP', )
def hasContent_(self):
if (
self.HTTP is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DCP'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.HTTP is not None:
showIndent(outfile, level)
outfile.write('HTTP=model_.HTTP(\n')
self.HTTP.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'HTTP':
obj_ = HTTP.factory()
obj_.build(child_)
self.set_HTTP(obj_)
# end class DCP
class HTTP(GeneratedsSuper):
"""Connect point URLs for the HTTP Distributed Computing Platform
(DCP). Normally, only one Get and/or one Post is included in
this element. More than one Get and/or Post is allowed to
support including alternative URLs for uses such as load
balancing or backup."""
subclass = None
superclass = None
def __init__(self, Get=None, Post=None):
if Get is None:
self.Get = []
else:
self.Get = Get
if Post is None:
self.Post = []
else:
self.Post = Post
def factory(*args_, **kwargs_):
if HTTP.subclass:
return HTTP.subclass(*args_, **kwargs_)
else:
return HTTP(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Get(self): return self.Get
def set_Get(self, Get): self.Get = Get
def add_Get(self, value): self.Get.append(value)
def insert_Get(self, index, value): self.Get[index] = value
def get_Post(self): return self.Post
def set_Post(self, Post): self.Post = Post
def add_Post(self, value): self.Post.append(value)
def insert_Post(self, index, value): self.Post[index] = value
def export(self, outfile, level, namespace_='', name_='HTTP', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='HTTP')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='HTTP'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='HTTP', fromsubclass_=False):
for Get_ in self.Get:
Get_.export(outfile, level, namespace_, name_='Get')
for Post_ in self.Post:
Post_.export(outfile, level, namespace_, name_='Post')
def hasContent_(self):
if (
self.Get or
self.Post
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='HTTP'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Get=[\n')
level += 1
for Get_ in self.Get:
showIndent(outfile, level)
outfile.write('model_.RequestMethodType(\n')
Get_.exportLiteral(outfile, level, name_='RequestMethodType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Post=[\n')
level += 1
for Post_ in self.Post:
showIndent(outfile, level)
outfile.write('model_.RequestMethodType(\n')
Post_.exportLiteral(outfile, level, name_='RequestMethodType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Get':
obj_ = RequestMethodType.factory()
obj_.build(child_)
self.Get.append(obj_)
elif nodeName_ == 'Post':
obj_ = RequestMethodType.factory()
obj_.build(child_)
self.Post.append(obj_)
# end class HTTP
class RequestMethodType(OnlineResourceType):
"""Connect point URL and any constraints for this HTTP request method
for this operation request. In the OnlineResourceType, the
xlink:href attribute in the xlink:simpleLink attribute group
shall be used to contain this URL. The other attributes in the
xlink:simpleLink attribute group should not be used."""
subclass = None
superclass = OnlineResourceType
def __init__(self, title=None, arcrole=None, actuate=None, href=None, role=None, show=None, type_=None, Constraint=None):
super(RequestMethodType, self).__init__(title, arcrole, actuate, href, role, show, type_, )
if Constraint is None:
self.Constraint = []
else:
self.Constraint = Constraint
def factory(*args_, **kwargs_):
if RequestMethodType.subclass:
return RequestMethodType.subclass(*args_, **kwargs_)
else:
return RequestMethodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Constraint(self): return self.Constraint
def set_Constraint(self, Constraint): self.Constraint = Constraint
def add_Constraint(self, value): self.Constraint.append(value)
def insert_Constraint(self, index, value): self.Constraint[index] = value
def export(self, outfile, level, namespace_='', name_='RequestMethodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='RequestMethodType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="RequestMethodType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RequestMethodType'):
super(RequestMethodType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RequestMethodType')
def exportChildren(self, outfile, level, namespace_='', name_='RequestMethodType', fromsubclass_=False):
super(RequestMethodType, self).exportChildren(outfile, level, namespace_, name_, True)
for Constraint_ in self.Constraint:
Constraint_.export(outfile, level, namespace_, name_='Constraint')
def hasContent_(self):
if (
self.Constraint or
super(RequestMethodType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RequestMethodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(RequestMethodType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RequestMethodType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Constraint=[\n')
level += 1
for Constraint_ in self.Constraint:
showIndent(outfile, level)
outfile.write('model_.DomainType(\n')
Constraint_.exportLiteral(outfile, level, name_='DomainType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(RequestMethodType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Constraint':
obj_ = DomainType.factory()
obj_.build(child_)
self.Constraint.append(obj_)
super(RequestMethodType, self).buildChildren(child_, node, nodeName_, True)
# end class RequestMethodType
class UnNamedDomainType(GeneratedsSuper):
"""Valid domain (or allowed set of values) of one quantity, with needed
metadata but without a quantity name or identifier."""
subclass = None
superclass = None
def __init__(self, AllowedValues=None, AnyValue=None, NoValues=None, ValuesReference=None, DefaultValue=None, Meaning=None, DataType=None, UOM=None, ReferenceSystem=None, Metadata=None):
self.AllowedValues = AllowedValues
self.AnyValue = AnyValue
self.NoValues = NoValues
self.ValuesReference = ValuesReference
self.DefaultValue = DefaultValue
self.Meaning = Meaning
self.DataType = DataType
self.UOM = UOM
self.ReferenceSystem = ReferenceSystem
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
def factory(*args_, **kwargs_):
if UnNamedDomainType.subclass:
return UnNamedDomainType.subclass(*args_, **kwargs_)
else:
return UnNamedDomainType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AllowedValues(self): return self.AllowedValues
def set_AllowedValues(self, AllowedValues): self.AllowedValues = AllowedValues
def get_AnyValue(self): return self.AnyValue
def set_AnyValue(self, AnyValue): self.AnyValue = AnyValue
def get_NoValues(self): return self.NoValues
def set_NoValues(self, NoValues): self.NoValues = NoValues
def get_ValuesReference(self): return self.ValuesReference
def set_ValuesReference(self, ValuesReference): self.ValuesReference = ValuesReference
def get_DefaultValue(self): return self.DefaultValue
def set_DefaultValue(self, DefaultValue): self.DefaultValue = DefaultValue
def get_Meaning(self): return self.Meaning
def set_Meaning(self, Meaning): self.Meaning = Meaning
def get_DataType(self): return self.DataType
def set_DataType(self, DataType): self.DataType = DataType
def get_UOM(self): return self.UOM
def set_UOM(self, UOM): self.UOM = UOM
def get_ReferenceSystem(self): return self.ReferenceSystem
def set_ReferenceSystem(self, ReferenceSystem): self.ReferenceSystem = ReferenceSystem
def get_Metadata(self): return self.Metadata
def set_Metadata(self, Metadata): self.Metadata = Metadata
def add_Metadata(self, value): self.Metadata.append(value)
def insert_Metadata(self, index, value): self.Metadata[index] = value
def export(self, outfile, level, namespace_='', name_='UnNamedDomainType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='UnNamedDomainType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='UnNamedDomainType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='UnNamedDomainType', fromsubclass_=False):
if self.AllowedValues:
self.AllowedValues.export(outfile, level, namespace_, name_='AllowedValues', )
if self.AnyValue:
self.AnyValue.export(outfile, level, namespace_, name_='AnyValue', )
if self.NoValues:
self.NoValues.export(outfile, level, namespace_, name_='NoValues', )
if self.ValuesReference:
self.ValuesReference.export(outfile, level, namespace_, name_='ValuesReference', )
if self.DefaultValue:
self.DefaultValue.export(outfile, level, namespace_, name_='DefaultValue')
if self.Meaning:
self.Meaning.export(outfile, level, namespace_, name_='Meaning')
if self.DataType:
self.DataType.export(outfile, level, namespace_, name_='DataType')
if self.UOM:
self.UOM.export(outfile, level, namespace_, name_='UOM', )
if self.ReferenceSystem:
self.ReferenceSystem.export(outfile, level, namespace_, name_='ReferenceSystem', )
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespace_, name_='Metadata')
def hasContent_(self):
if (
self.AllowedValues is not None or
self.AnyValue is not None or
self.NoValues is not None or
self.ValuesReference is not None or
self.DefaultValue is not None or
self.Meaning is not None or
self.DataType is not None or
self.UOM is not None or
self.ReferenceSystem is not None or
self.Metadata
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='UnNamedDomainType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.AllowedValues is not None:
showIndent(outfile, level)
outfile.write('AllowedValues=model_.AllowedValues(\n')
self.AllowedValues.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AnyValue is not None:
showIndent(outfile, level)
outfile.write('AnyValue=model_.AnyValue(\n')
self.AnyValue.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.NoValues is not None:
showIndent(outfile, level)
outfile.write('NoValues=model_.NoValues(\n')
self.NoValues.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ValuesReference is not None:
showIndent(outfile, level)
outfile.write('ValuesReference=model_.ValuesReference(\n')
self.ValuesReference.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.DefaultValue is not None:
showIndent(outfile, level)
outfile.write('DefaultValue=model_.DefaultValue(\n')
self.DefaultValue.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Meaning is not None:
showIndent(outfile, level)
outfile.write('Meaning=model_.Meaning(\n')
self.Meaning.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.DataType is not None:
showIndent(outfile, level)
outfile.write('DataType=model_.DataType(\n')
self.DataType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.UOM is not None:
showIndent(outfile, level)
outfile.write('UOM=model_.UOM(\n')
self.UOM.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ReferenceSystem is not None:
showIndent(outfile, level)
outfile.write('ReferenceSystem=model_.ReferenceSystem(\n')
self.ReferenceSystem.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Metadata=[\n')
level += 1
for Metadata_ in self.Metadata:
showIndent(outfile, level)
outfile.write('model_.Metadata(\n')
Metadata_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AllowedValues':
obj_ = AllowedValues.factory()
obj_.build(child_)
self.set_AllowedValues(obj_)
elif nodeName_ == 'AnyValue':
obj_ = AnyValue.factory()
obj_.build(child_)
self.set_AnyValue(obj_)
elif nodeName_ == 'NoValues':
obj_ = NoValues.factory()
obj_.build(child_)
self.set_NoValues(obj_)
elif nodeName_ == 'ValuesReference':
obj_ = ValuesReference.factory()
obj_.build(child_)
self.set_ValuesReference(obj_)
elif nodeName_ == 'DefaultValue':
obj_ = ValueType.factory()
obj_.build(child_)
self.set_DefaultValue(obj_)
elif nodeName_ == 'Meaning':
obj_ = DomainMetadataType.factory()
obj_.build(child_)
self.set_Meaning(obj_)
elif nodeName_ == 'DataType':
obj_ = DomainMetadataType.factory()
obj_.build(child_)
self.set_DataType(obj_)
elif nodeName_ == 'UOM':
obj_ = DomainMetadataType.factory()
obj_.build(child_)
self.set_UOM(obj_)
elif nodeName_ == 'ReferenceSystem':
obj_ = DomainMetadataType.factory()
obj_.build(child_)
self.set_ReferenceSystem(obj_)
elif nodeName_ == 'Metadata':
obj_ = MetadataType.factory()
obj_.build(child_)
self.Metadata.append(obj_)
# end class UnNamedDomainType
class AnyValue(GeneratedsSuper):
"""Specifies that any value is allowed for this parameter."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if AnyValue.subclass:
return AnyValue.subclass(*args_, **kwargs_)
else:
return AnyValue(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='AnyValue', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AnyValue')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AnyValue'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AnyValue', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AnyValue'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AnyValue
class NoValues(GeneratedsSuper):
"""Specifies that no values are allowed for this parameter or quantity."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if NoValues.subclass:
return NoValues.subclass(*args_, **kwargs_)
else:
return NoValues(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='NoValues', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='NoValues')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NoValues'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='NoValues', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NoValues'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class NoValues
class ValuesReference(GeneratedsSuper):
"""Reference to externally specified list of all the valid values
and/or ranges of values for this quantity. (Informative: This
element was simplified from the metaDataProperty element in GML
3.0.) Human-readable name of the list of values provided by the
referenced document. Can be empty string when this list has no
name."""
subclass = None
superclass = None
def __init__(self, reference=None, valueOf_=None):
self.reference = _cast(None, reference)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ValuesReference.subclass:
return ValuesReference.subclass(*args_, **kwargs_)
else:
return ValuesReference(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='ValuesReference', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ValuesReference')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ValuesReference'):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
outfile.write(' reference=%s' % (self.gds_format_string(quote_attrib(self.reference).encode(ExternalEncoding), input_name='reference'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ValuesReference', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ValuesReference'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
showIndent(outfile, level)
outfile.write('reference = "%s",\n' % (self.reference,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.append('reference')
self.reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ValuesReference
class AllowedValues(GeneratedsSuper):
"""List of all the valid values and/or ranges of values for this
quantity. For numeric quantities, signed values should be
ordered from negative infinity to positive infinity."""
subclass = None
superclass = None
def __init__(self, Value=None, Range=None):
if Value is None:
self.Value = []
else:
self.Value = Value
if Range is None:
self.Range = []
else:
self.Range = Range
def factory(*args_, **kwargs_):
if AllowedValues.subclass:
return AllowedValues.subclass(*args_, **kwargs_)
else:
return AllowedValues(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def add_Value(self, value): self.Value.append(value)
def insert_Value(self, index, value): self.Value[index] = value
def get_Range(self): return self.Range
def set_Range(self, Range): self.Range = Range
def add_Range(self, value): self.Range.append(value)
def insert_Range(self, index, value): self.Range[index] = value
def export(self, outfile, level, namespace_='', name_='AllowedValues', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AllowedValues')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllowedValues'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AllowedValues', fromsubclass_=False):
for Value_ in self.Value:
Value_.export(outfile, level, namespace_, name_='Value')
for Range_ in self.Range:
Range_.export(outfile, level, namespace_, name_='Range')
def hasContent_(self):
if (
self.Value or
self.Range
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AllowedValues'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Value=[\n')
level += 1
for Value_ in self.Value:
showIndent(outfile, level)
outfile.write('model_.Value(\n')
Value_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Range=[\n')
level += 1
for Range_ in self.Range:
showIndent(outfile, level)
outfile.write('model_.Range(\n')
Range_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Value':
obj_ = ValueType.factory()
obj_.build(child_)
self.Value.append(obj_)
elif nodeName_ == 'Range':
obj_ = RangeType.factory()
obj_.build(child_)
self.Range.append(obj_)
# end class AllowedValues
class ValueType(GeneratedsSuper):
"""A single value, encoded as a string. This type can be used for one
value, for a spacing between allowed values, or for the default
value of a parameter."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ValueType.subclass:
return ValueType.subclass(*args_, **kwargs_)
else:
return ValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='ValueType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ValueType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ValueType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ValueType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ValueType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ValueType
class RangeType(GeneratedsSuper):
"""A range of values of a numeric parameter. This range can be
continuous or discrete, defined by a fixed spacing between
adjacent valid values. If the MinimumValue or MaximumValue is
not included, there is no value limit in that direction.
Inclusion of the specified minimum and maximum values in the
range shall be defined by the rangeClosure. Shall be included
unless the default value applies."""
subclass = None
superclass = None
def __init__(self, rangeClosure=None, MinimumValue=None, MaximumValue=None, Spacing=None):
self.rangeClosure = _cast(None, rangeClosure)
self.MinimumValue = MinimumValue
self.MaximumValue = MaximumValue
self.Spacing = Spacing
def factory(*args_, **kwargs_):
if RangeType.subclass:
return RangeType.subclass(*args_, **kwargs_)
else:
return RangeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MinimumValue(self): return self.MinimumValue
def set_MinimumValue(self, MinimumValue): self.MinimumValue = MinimumValue
def get_MaximumValue(self): return self.MaximumValue
def set_MaximumValue(self, MaximumValue): self.MaximumValue = MaximumValue
def get_Spacing(self): return self.Spacing
def set_Spacing(self, Spacing): self.Spacing = Spacing
def get_rangeClosure(self): return self.rangeClosure
def set_rangeClosure(self, rangeClosure): self.rangeClosure = rangeClosure
def export(self, outfile, level, namespace_='', name_='RangeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='RangeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RangeType'):
if self.rangeClosure is not None and 'rangeClosure' not in already_processed:
already_processed.append('rangeClosure')
outfile.write(' rangeClosure=%s' % (self.gds_format_string(quote_attrib(self.rangeClosure).encode(ExternalEncoding), input_name='rangeClosure'), ))
def exportChildren(self, outfile, level, namespace_='', name_='RangeType', fromsubclass_=False):
if self.MinimumValue:
self.MinimumValue.export(outfile, level, namespace_, name_='MinimumValue')
if self.MaximumValue:
self.MaximumValue.export(outfile, level, namespace_, name_='MaximumValue')
if self.Spacing:
self.Spacing.export(outfile, level, namespace_, name_='Spacing')
def hasContent_(self):
if (
self.MinimumValue is not None or
self.MaximumValue is not None or
self.Spacing is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RangeType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rangeClosure is not None and 'rangeClosure' not in already_processed:
already_processed.append('rangeClosure')
showIndent(outfile, level)
outfile.write('rangeClosure = "%s",\n' % (self.rangeClosure,))
def exportLiteralChildren(self, outfile, level, name_):
if self.MinimumValue is not None:
showIndent(outfile, level)
outfile.write('MinimumValue=model_.MinimumValue(\n')
self.MinimumValue.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.MaximumValue is not None:
showIndent(outfile, level)
outfile.write('MaximumValue=model_.MaximumValue(\n')
self.MaximumValue.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Spacing is not None:
showIndent(outfile, level)
outfile.write('Spacing=model_.Spacing(\n')
self.Spacing.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rangeClosure', node)
if value is not None and 'rangeClosure' not in already_processed:
already_processed.append('rangeClosure')
self.rangeClosure = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'MinimumValue':
obj_ = ValueType.factory()
obj_.build(child_)
self.set_MinimumValue(obj_)
elif nodeName_ == 'MaximumValue':
obj_ = ValueType.factory()
obj_.build(child_)
self.set_MaximumValue(obj_)
elif nodeName_ == 'Spacing':
obj_ = ValueType.factory()
obj_.build(child_)
self.set_Spacing(obj_)
# end class RangeType
class DomainMetadataType(GeneratedsSuper):
"""References metadata about a quantity, and provides a name for this
metadata. (Informative: This element was simplified from the
metaDataProperty element in GML 3.0.) Human-readable name of the
metadata described by associated referenced document."""
subclass = None
superclass = None
def __init__(self, reference=None, valueOf_=None):
self.reference = _cast(None, reference)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if DomainMetadataType.subclass:
return DomainMetadataType.subclass(*args_, **kwargs_)
else:
return DomainMetadataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='DomainMetadataType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DomainMetadataType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DomainMetadataType'):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
outfile.write(' reference=%s' % (self.gds_format_string(quote_attrib(self.reference).encode(ExternalEncoding), input_name='reference'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DomainMetadataType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DomainMetadataType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.reference is not None and 'reference' not in already_processed:
already_processed.append('reference')
showIndent(outfile, level)
outfile.write('reference = "%s",\n' % (self.reference,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.append('reference')
self.reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DomainMetadataType
class ExceptionReport(GeneratedsSuper):
"""Report message returned to the client that requested any OWS
operation when the server detects an error while processing that
operation request. Specification version for OWS operation. The
string value shall contain one x.y.z "version" value (e.g.,
"2.1.3"). A version number shall contain three non-negative
integers separated by decimal points, in the form "x.y.z". The
integers y and z shall not exceed 99. Each version shall be for
the Implementation Specification (document) and the associated
XML Schemas to which requested operations will conform. An
Implementation Specification version normally specifies XML
Schemas against which an XML encoded operation response must
conform and should be validated. See Version negotiation
subclause for more information. Identifier of the language used
by all included exception text values. These language
identifiers shall be as specified in IETF RFC 4646. When this
attribute is omitted, the language used is not identified."""
subclass = None
superclass = None
def __init__(self, lang=None, version=None, Exception=None):
self.lang = _cast(None, lang)
self.version = _cast(None, version)
if Exception is None:
self.Exception = []
else:
self.Exception = Exception
def factory(*args_, **kwargs_):
if ExceptionReport.subclass:
return ExceptionReport.subclass(*args_, **kwargs_)
else:
return ExceptionReport(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Exception(self): return self.Exception
def set_Exception(self, Exception): self.Exception = Exception
def add_Exception(self, value): self.Exception.append(value)
def insert_Exception(self, index, value): self.Exception[index] = value
def get_lang(self): return self.lang
def set_lang(self, lang): self.lang = lang
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='', name_='ExceptionReport', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ExceptionReport')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExceptionReport'):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (self.gds_format_string(quote_attrib(self.lang).encode(ExternalEncoding), input_name='lang'), ))
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExceptionReport', fromsubclass_=False):
for Exception_ in self.Exception:
Exception_.export(outfile, level, namespace_, name_='Exception')
def hasContent_(self):
if (
self.Exception
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ExceptionReport'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = "%s",\n' % (self.version,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Exception=[\n')
level += 1
for Exception_ in self.Exception:
showIndent(outfile, level)
outfile.write('model_.Exception(\n')
Exception_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
self.version = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Exception':
obj_ = ExceptionType.factory()
obj_.build(child_)
self.Exception.append(obj_)
# end class ExceptionReport
class ExceptionType(GeneratedsSuper):
"""An Exception element describes one detected error that a server
chooses to convey to the client. A code representing the type of
this exception, which shall be selected from a set of
exceptionCode values specified for the specific service
operation and server. When included, this locator shall indicate
to the client where an exception was encountered in servicing
the client's operation request. This locator should be included
whenever meaningful information can be provided by the server.
The contents of this locator will depend on the specific
exceptionCode and OWS service, and shall be specified in the OWS
Implementation Specification."""
subclass = None
superclass = None
def __init__(self, locator=None, exceptionCode=None, ExceptionText=None):
self.locator = _cast(None, locator)
self.exceptionCode = _cast(None, exceptionCode)
if ExceptionText is None:
self.ExceptionText = []
else:
self.ExceptionText = ExceptionText
def factory(*args_, **kwargs_):
if ExceptionType.subclass:
return ExceptionType.subclass(*args_, **kwargs_)
else:
return ExceptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ExceptionText(self): return self.ExceptionText
def set_ExceptionText(self, ExceptionText): self.ExceptionText = ExceptionText
def add_ExceptionText(self, value): self.ExceptionText.append(value)
def insert_ExceptionText(self, index, value): self.ExceptionText[index] = value
def get_locator(self): return self.locator
def set_locator(self, locator): self.locator = locator
def get_exceptionCode(self): return self.exceptionCode
def set_exceptionCode(self, exceptionCode): self.exceptionCode = exceptionCode
def export(self, outfile, level, namespace_='', name_='ExceptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ExceptionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExceptionType'):
if self.locator is not None and 'locator' not in already_processed:
already_processed.append('locator')
outfile.write(' locator=%s' % (self.gds_format_string(quote_attrib(self.locator).encode(ExternalEncoding), input_name='locator'), ))
if self.exceptionCode is not None and 'exceptionCode' not in already_processed:
already_processed.append('exceptionCode')
outfile.write(' exceptionCode=%s' % (self.gds_format_string(quote_attrib(self.exceptionCode).encode(ExternalEncoding), input_name='exceptionCode'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExceptionType', fromsubclass_=False):
for ExceptionText_ in self.ExceptionText:
showIndent(outfile, level)
outfile.write('<%sExceptionText>%s</%sExceptionText>\n' % (namespace_, self.gds_format_string(quote_xml(ExceptionText_).encode(ExternalEncoding), input_name='ExceptionText'), namespace_))
def hasContent_(self):
if (
self.ExceptionText
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ExceptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.locator is not None and 'locator' not in already_processed:
already_processed.append('locator')
showIndent(outfile, level)
outfile.write('locator = "%s",\n' % (self.locator,))
if self.exceptionCode is not None and 'exceptionCode' not in already_processed:
already_processed.append('exceptionCode')
showIndent(outfile, level)
outfile.write('exceptionCode = "%s",\n' % (self.exceptionCode,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('ExceptionText=[\n')
level += 1
for ExceptionText_ in self.ExceptionText:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(ExceptionText_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('locator', node)
if value is not None and 'locator' not in already_processed:
already_processed.append('locator')
self.locator = value
value = find_attr_value_('exceptionCode', node)
if value is not None and 'exceptionCode' not in already_processed:
already_processed.append('exceptionCode')
self.exceptionCode = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ExceptionText':
ExceptionText_ = child_.text
ExceptionText_ = self.gds_validate_string(ExceptionText_, node, 'ExceptionText')
self.ExceptionText.append(ExceptionText_)
# end class ExceptionType
class ContentsBaseType(GeneratedsSuper):
"""Contents of typical Contents section of an OWS service metadata
(Capabilities) document. This type shall be extended and/or
restricted if needed for specific OWS use to include the
specific metadata needed."""
subclass = None
superclass = None
def __init__(self, DatasetDescriptionSummary=None, OtherSource=None):
if DatasetDescriptionSummary is None:
self.DatasetDescriptionSummary = []
else:
self.DatasetDescriptionSummary = DatasetDescriptionSummary
if OtherSource is None:
self.OtherSource = []
else:
self.OtherSource = OtherSource
def factory(*args_, **kwargs_):
if ContentsBaseType.subclass:
return ContentsBaseType.subclass(*args_, **kwargs_)
else:
return ContentsBaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DatasetDescriptionSummary(self): return self.DatasetDescriptionSummary
def set_DatasetDescriptionSummary(self, DatasetDescriptionSummary): self.DatasetDescriptionSummary = DatasetDescriptionSummary
def add_DatasetDescriptionSummary(self, value): self.DatasetDescriptionSummary.append(value)
def insert_DatasetDescriptionSummary(self, index, value): self.DatasetDescriptionSummary[index] = value
def get_OtherSource(self): return self.OtherSource
def set_OtherSource(self, OtherSource): self.OtherSource = OtherSource
def add_OtherSource(self, value): self.OtherSource.append(value)
def insert_OtherSource(self, index, value): self.OtherSource[index] = value
def export(self, outfile, level, namespace_='', name_='ContentsBaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ContentsBaseType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContentsBaseType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ContentsBaseType', fromsubclass_=False):
for DatasetDescriptionSummary_ in self.DatasetDescriptionSummary:
DatasetDescriptionSummary_.export(outfile, level, namespace_, name_='Layer')
for OtherSource_ in self.OtherSource:
OtherSource_.export(outfile, level, namespace_, name_='OtherSource')
def hasContent_(self):
if (
self.DatasetDescriptionSummary or
self.OtherSource
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContentsBaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('DatasetDescriptionSummary=[\n')
level += 1
for DatasetDescriptionSummary_ in self.DatasetDescriptionSummary:
showIndent(outfile, level)
outfile.write('model_.DatasetDescriptionSummary(\n')
DatasetDescriptionSummary_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('OtherSource=[\n')
level += 1
for OtherSource_ in self.OtherSource:
showIndent(outfile, level)
outfile.write('model_.OtherSource(\n')
OtherSource_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DatasetDescriptionSummary':
obj_ = DatasetDescriptionSummaryBaseType.factory()
obj_.build(child_)
self.DatasetDescriptionSummary.append(obj_)
elif nodeName_ == 'OtherSource':
obj_ = MetadataType.factory()
obj_.build(child_)
self.OtherSource.append(obj_)
# end class ContentsBaseType
class DatasetDescriptionSummaryBaseType(DescriptionType):
"""Typical dataset metadata in typical Contents section of an OWS
service metadata (Capabilities) document. This type shall be
extended and/or restricted if needed for specific OWS use, to
include the specific Dataset description metadata needed."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, WGS84BoundingBox=None, Identifier=None, BoundingBox=None, Metadata=None, DatasetDescriptionSummary=None):
super(DatasetDescriptionSummaryBaseType, self).__init__(Title, Abstract, Keywords, )
if WGS84BoundingBox is None:
self.WGS84BoundingBox = []
else:
self.WGS84BoundingBox = WGS84BoundingBox
self.Identifier = Identifier
if BoundingBox is None:
self.BoundingBox = []
else:
self.BoundingBox = BoundingBox
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
if DatasetDescriptionSummary is None:
self.DatasetDescriptionSummary = []
else:
self.DatasetDescriptionSummary = DatasetDescriptionSummary
def factory(*args_, **kwargs_):
if DatasetDescriptionSummaryBaseType.subclass:
return DatasetDescriptionSummaryBaseType.subclass(*args_, **kwargs_)
else:
return DatasetDescriptionSummaryBaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_WGS84BoundingBox(self): return self.WGS84BoundingBox
def set_WGS84BoundingBox(self, WGS84BoundingBox): self.WGS84BoundingBox = WGS84BoundingBox
def add_WGS84BoundingBox(self, value): self.WGS84BoundingBox.append(value)
def insert_WGS84BoundingBox(self, index, value): self.WGS84BoundingBox[index] = value
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_BoundingBox(self): return self.BoundingBox
def set_BoundingBox(self, BoundingBox): self.BoundingBox = BoundingBox
def add_BoundingBox(self, value): self.BoundingBox.append(value)
def insert_BoundingBox(self, index, value): self.BoundingBox[index] = value
def get_Metadata(self): return self.Metadata
def set_Metadata(self, Metadata): self.Metadata = Metadata
def add_Metadata(self, value): self.Metadata.append(value)
def insert_Metadata(self, index, value): self.Metadata[index] = value
def get_DatasetDescriptionSummary(self): return self.DatasetDescriptionSummary
def set_DatasetDescriptionSummary(self, DatasetDescriptionSummary): self.DatasetDescriptionSummary = DatasetDescriptionSummary
def add_DatasetDescriptionSummary(self, value): self.DatasetDescriptionSummary.append(value)
def insert_DatasetDescriptionSummary(self, index, value): self.DatasetDescriptionSummary[index] = value
def export(self, outfile, level, namespace_='', name_='DatasetDescriptionSummaryBaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DatasetDescriptionSummaryBaseType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="DatasetDescriptionSummaryBaseType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DatasetDescriptionSummaryBaseType'):
super(DatasetDescriptionSummaryBaseType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DatasetDescriptionSummaryBaseType')
def exportChildren(self, outfile, level, namespace_='', name_='DatasetDescriptionSummaryBaseType', fromsubclass_=False):
super(DatasetDescriptionSummaryBaseType, self).exportChildren(outfile, level, namespace_, name_, True)
for WGS84BoundingBox_ in self.WGS84BoundingBox:
WGS84BoundingBox_.export(outfile, level, namespace_, name_='WGS84BoundingBox')
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
for BoundingBox_ in self.BoundingBox:
BoundingBox_.export(outfile, level, namespace_, name_='BoundingBox')
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespace_, name_='Metadata')
for DatasetDescriptionSummary_ in self.DatasetDescriptionSummary:
DatasetDescriptionSummary_.export(outfile, level, namespace_, name_='DatasetDescriptionSummary')
def hasContent_(self):
if (
self.WGS84BoundingBox or
self.Identifier is not None or
self.BoundingBox or
self.Metadata or
self.DatasetDescriptionSummary or
super(DatasetDescriptionSummaryBaseType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DatasetDescriptionSummaryBaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DatasetDescriptionSummaryBaseType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DatasetDescriptionSummaryBaseType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('WGS84BoundingBox=[\n')
level += 1
for WGS84BoundingBox_ in self.WGS84BoundingBox:
showIndent(outfile, level)
outfile.write('model_.WGS84BoundingBox(\n')
WGS84BoundingBox_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.CodeType(\n')
self.Identifier.exportLiteral(outfile, level, name_='Identifier')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('BoundingBox=[\n')
level += 1
for BoundingBox_ in self.BoundingBox:
showIndent(outfile, level)
outfile.write('model_.BoundingBox(\n')
BoundingBox_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Metadata=[\n')
level += 1
for Metadata_ in self.Metadata:
showIndent(outfile, level)
outfile.write('model_.Metadata(\n')
Metadata_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('DatasetDescriptionSummary=[\n')
level += 1
for DatasetDescriptionSummary_ in self.DatasetDescriptionSummary:
showIndent(outfile, level)
outfile.write('model_.DatasetDescriptionSummary(\n')
DatasetDescriptionSummary_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DatasetDescriptionSummaryBaseType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'WGS84BoundingBox':
obj_ = WGS84BoundingBoxType.factory()
obj_.build(child_)
self.WGS84BoundingBox.append(obj_)
elif nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'BoundingBox':
obj_ = BoundingBoxType.factory()
obj_.build(child_)
self.BoundingBox.append(obj_)
elif nodeName_ == 'Metadata':
obj_ = MetadataType.factory()
obj_.build(child_)
self.Metadata.append(obj_)
elif nodeName_ == 'DatasetDescriptionSummary':
obj_ = DatasetDescriptionSummaryBaseType.factory()
obj_.build(child_)
self.DatasetDescriptionSummary.append(obj_)
super(DatasetDescriptionSummaryBaseType, self).buildChildren(child_, node, nodeName_, True)
# end class DatasetDescriptionSummaryBaseType
class AbstractReferenceBaseType(GeneratedsSuper):
"""Base for a reference to a remote or local resource. This type
contains only a restricted and annotated set of the attributes
from the xlink:simpleLink attributeGroup. Reference to a remote
resource or local payload. A remote resource is typically
addressed by a URL. For a local payload (such as a multipart
mime message), the xlink:href must start with the prefix cid:.
Reference to a resource that describes the role of this
reference. When no value is supplied, no particular role value
is to be inferred. Although allowed, this attribute is not
expected to be useful in this application of xlink:simpleLink.
Describes the meaning of the referenced resource in a human-
readable fashion. Although allowed, this attribute is not
expected to be useful in this application of xlink:simpleLink.
Although allowed, this attribute is not expected to be useful in
this application of xlink:simpleLink."""
subclass = None
superclass = None
def __init__(self, show=None, title=None, actuate=None, href=None, role=None, arcrole=None, type_=None, valueOf_=None):
self.show = _cast(None, show)
self.title = _cast(None, title)
self.actuate = _cast(None, actuate)
self.href = _cast(None, href)
self.role = _cast(None, role)
self.arcrole = _cast(None, arcrole)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if AbstractReferenceBaseType.subclass:
return AbstractReferenceBaseType.subclass(*args_, **kwargs_)
else:
return AbstractReferenceBaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_show(self): return self.show
def set_show(self, show): self.show = show
def get_title(self): return self.title
def set_title(self, title): self.title = title
def get_actuate(self): return self.actuate
def set_actuate(self, actuate): self.actuate = actuate
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_role(self): return self.role
def set_role(self, role): self.role = role
def get_arcrole(self): return self.arcrole
def set_arcrole(self, arcrole): self.arcrole = arcrole
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='AbstractReferenceBaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='AbstractReferenceBaseType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractReferenceBaseType'):
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
outfile.write(' show=%s' % (self.gds_format_string(quote_attrib(self.show).encode(ExternalEncoding), input_name='show'), ))
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
outfile.write(' title=%s' % (self.gds_format_string(quote_attrib(self.title).encode(ExternalEncoding), input_name='title'), ))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
outfile.write(' actuate=%s' % (self.gds_format_string(quote_attrib(self.actuate).encode(ExternalEncoding), input_name='actuate'), ))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
outfile.write(' role=%s' % (self.gds_format_string(quote_attrib(self.role).encode(ExternalEncoding), input_name='role'), ))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
outfile.write(' arcrole=%s' % (self.gds_format_string(quote_attrib(self.arcrole).encode(ExternalEncoding), input_name='arcrole'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractReferenceBaseType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractReferenceBaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.show is not None and 'show' not in already_processed:
already_processed.append('show')
showIndent(outfile, level)
outfile.write('show = "%s",\n' % (self.show,))
if self.title is not None and 'title' not in already_processed:
already_processed.append('title')
showIndent(outfile, level)
outfile.write('title = "%s",\n' % (self.title,))
if self.actuate is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
showIndent(outfile, level)
outfile.write('actuate = "%s",\n' % (self.actuate,))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
showIndent(outfile, level)
outfile.write('href = "%s",\n' % (self.href,))
if self.role is not None and 'role' not in already_processed:
already_processed.append('role')
showIndent(outfile, level)
outfile.write('role = "%s",\n' % (self.role,))
if self.arcrole is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
showIndent(outfile, level)
outfile.write('arcrole = "%s",\n' % (self.arcrole,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('show', node)
if value is not None and 'show' not in already_processed:
already_processed.append('show')
self.show = value
value = find_attr_value_('title', node)
if value is not None and 'title' not in already_processed:
already_processed.append('title')
self.title = value
value = find_attr_value_('actuate', node)
if value is not None and 'actuate' not in already_processed:
already_processed.append('actuate')
self.actuate = value
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.append('href')
self.href = value
value = find_attr_value_('role', node)
if value is not None and 'role' not in already_processed:
already_processed.append('role')
self.role = value
value = find_attr_value_('arcrole', node)
if value is not None and 'arcrole' not in already_processed:
already_processed.append('arcrole')
self.arcrole = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AbstractReferenceBaseType
class ReferenceType(AbstractReferenceBaseType):
"""Complete reference to a remote or local resource, allowing including
metadata about that resource."""
subclass = None
superclass = AbstractReferenceBaseType
def __init__(self, show=None, title=None, actuate=None, href=None, role=None, arcrole=None, type_=None, Identifier=None, Abstract=None, Format=None, Metadata=None):
super(ReferenceType, self).__init__(show, title, actuate, href, role, arcrole, type_, )
self.Identifier = Identifier
if Abstract is None:
self.Abstract = []
else:
self.Abstract = Abstract
self.Format = Format
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_Abstract(self): return self.Abstract
def set_Abstract(self, Abstract): self.Abstract = Abstract
def add_Abstract(self, value): self.Abstract.append(value)
def insert_Abstract(self, index, value): self.Abstract[index] = value
def get_Format(self): return self.Format
def set_Format(self, Format): self.Format = Format
def validate_MimeType(self, value):
# Validate type MimeType, a restriction on string.
pass
def get_Metadata(self): return self.Metadata
def set_Metadata(self, Metadata): self.Metadata = Metadata
def add_Metadata(self, value): self.Metadata.append(value)
def insert_Metadata(self, index, value): self.Metadata[index] = value
def export(self, outfile, level, namespace_='', name_='ReferenceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ReferenceType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ReferenceType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceType'):
super(ReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceType', fromsubclass_=False):
super(ReferenceType, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier')
for Abstract_ in self.Abstract:
Abstract_.export(outfile, level, namespace_, name_='Abstract')
if self.Format is not None:
showIndent(outfile, level)
outfile.write('<%sFormat>%s</%sFormat>\n' % (namespace_, self.gds_format_string(quote_xml(self.Format).encode(ExternalEncoding), input_name='Format'), namespace_))
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespace_, name_='Metadata')
def hasContent_(self):
if (
self.Identifier is not None or
self.Abstract or
self.Format is not None or
self.Metadata or
super(ReferenceType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferenceType, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Abstract=[\n')
level += 1
for Abstract_ in self.Abstract:
showIndent(outfile, level)
outfile.write('model_.Abstract(\n')
Abstract_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Format is not None:
showIndent(outfile, level)
outfile.write('Format=%s,\n' % quote_python(self.Format).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('Metadata=[\n')
level += 1
for Metadata_ in self.Metadata:
showIndent(outfile, level)
outfile.write('model_.Metadata(\n')
Metadata_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'Abstract':
obj_ = LanguageStringType.factory()
obj_.build(child_)
self.Abstract.append(obj_)
elif nodeName_ == 'Format':
Format_ = child_.text
Format_ = self.gds_validate_string(Format_, node, 'Format')
self.Format = Format_
self.validate_MimeType(self.Format) # validate type MimeType
elif nodeName_ == 'Metadata':
obj_ = MetadataType.factory()
obj_.build(child_)
self.Metadata.append(obj_)
super(ReferenceType, self).buildChildren(child_, node, nodeName_, True)
# end class ReferenceType
class ReferenceGroupType(BasicIdentificationType):
"""Logical group of one or more references to remote and/or local
resources, allowing including metadata about that group. A Group
can be used instead of a Manifest that can only contain one
group."""
subclass = None
superclass = BasicIdentificationType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, Metadata=None, AbstractReferenceBase=None):
super(ReferenceGroupType, self).__init__(Title, Abstract, Keywords, Identifier, Metadata, )
if AbstractReferenceBase is None:
self.AbstractReferenceBase = []
else:
self.AbstractReferenceBase = AbstractReferenceBase
def factory(*args_, **kwargs_):
if ReferenceGroupType.subclass:
return ReferenceGroupType.subclass(*args_, **kwargs_)
else:
return ReferenceGroupType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AbstractReferenceBase(self): return self.AbstractReferenceBase
def set_AbstractReferenceBase(self, AbstractReferenceBase): self.AbstractReferenceBase = AbstractReferenceBase
def add_AbstractReferenceBase(self, value): self.AbstractReferenceBase.append(value)
def insert_AbstractReferenceBase(self, index, value): self.AbstractReferenceBase[index] = value
def export(self, outfile, level, namespace_='', name_='ReferenceGroupType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ReferenceGroupType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ReferenceGroupType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceGroupType'):
super(ReferenceGroupType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceGroupType')
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceGroupType', fromsubclass_=False):
super(ReferenceGroupType, self).exportChildren(outfile, level, namespace_, name_, True)
for AbstractReferenceBase_ in self.get_AbstractReferenceBase():
AbstractReferenceBase_.export(outfile, level, namespace_, name_='AbstractReferenceBase')
def hasContent_(self):
if (
self.AbstractReferenceBase or
super(ReferenceGroupType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReferenceGroupType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ReferenceGroupType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferenceGroupType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('AbstractReferenceBase=[\n')
level += 1
for AbstractReferenceBase_ in self.AbstractReferenceBase:
showIndent(outfile, level)
outfile.write('model_.AbstractReferenceBase(\n')
AbstractReferenceBase_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ReferenceGroupType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AbstractReferenceBase':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <AbstractReferenceBase> element')
self.AbstractReferenceBase.append(obj_)
super(ReferenceGroupType, self).buildChildren(child_, node, nodeName_, True)
# end class ReferenceGroupType
class ManifestType(BasicIdentificationType):
"""Unordered list of one or more groups of references to remote and/or
local resources."""
subclass = None
superclass = BasicIdentificationType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, Metadata=None, ReferenceGroup=None):
super(ManifestType, self).__init__(Title, Abstract, Keywords, Identifier, Metadata, )
if ReferenceGroup is None:
self.ReferenceGroup = []
else:
self.ReferenceGroup = ReferenceGroup
def factory(*args_, **kwargs_):
if ManifestType.subclass:
return ManifestType.subclass(*args_, **kwargs_)
else:
return ManifestType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ReferenceGroup(self): return self.ReferenceGroup
def set_ReferenceGroup(self, ReferenceGroup): self.ReferenceGroup = ReferenceGroup
def add_ReferenceGroup(self, value): self.ReferenceGroup.append(value)
def insert_ReferenceGroup(self, index, value): self.ReferenceGroup[index] = value
def export(self, outfile, level, namespace_='', name_='ManifestType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ManifestType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ManifestType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ManifestType'):
super(ManifestType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ManifestType')
def exportChildren(self, outfile, level, namespace_='', name_='ManifestType', fromsubclass_=False):
super(ManifestType, self).exportChildren(outfile, level, namespace_, name_, True)
for ReferenceGroup_ in self.ReferenceGroup:
ReferenceGroup_.export(outfile, level, namespace_, name_='ReferenceGroup')
def hasContent_(self):
if (
self.ReferenceGroup or
super(ManifestType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ManifestType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ManifestType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ManifestType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('ReferenceGroup=[\n')
level += 1
for ReferenceGroup_ in self.ReferenceGroup:
showIndent(outfile, level)
outfile.write('model_.ReferenceGroup(\n')
ReferenceGroup_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ManifestType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ReferenceGroup':
obj_ = ReferenceGroupType.factory()
obj_.build(child_)
self.ReferenceGroup.append(obj_)
super(ManifestType, self).buildChildren(child_, node, nodeName_, True)
# end class ManifestType
class ServiceReferenceType(ReferenceType):
"""Complete reference to a remote resource that needs to be retrieved
from an OWS using an XML-encoded operation request. This element
shall be used, within an InputData or Manifest element that is
used for input data, when that input data needs to be retrieved
from another web service using a XML-encoded OWS operation
request. This element shall not be used for local payload input
data or for requesting the resource from a web server using HTTP
Get."""
subclass = None
superclass = ReferenceType
def __init__(self, show=None, title=None, actuate=None, href=None, role=None, arcrole=None, type_=None, Identifier=None, Abstract=None, Format=None, Metadata=None, RequestMessage=None, RequestMessageReference=None):
super(ServiceReferenceType, self).__init__(show, title, actuate, href, role, arcrole, type_, Identifier, Abstract, Format, Metadata, )
self.RequestMessage = RequestMessage
self.RequestMessageReference = RequestMessageReference
def factory(*args_, **kwargs_):
if ServiceReferenceType.subclass:
return ServiceReferenceType.subclass(*args_, **kwargs_)
else:
return ServiceReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RequestMessage(self): return self.RequestMessage
def set_RequestMessage(self, RequestMessage): self.RequestMessage = RequestMessage
def get_RequestMessageReference(self): return self.RequestMessageReference
def set_RequestMessageReference(self, RequestMessageReference): self.RequestMessageReference = RequestMessageReference
def export(self, outfile, level, namespace_='', name_='ServiceReferenceType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ServiceReferenceType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ServiceReferenceType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceReferenceType'):
super(ServiceReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ServiceReferenceType')
def exportChildren(self, outfile, level, namespace_='', name_='ServiceReferenceType', fromsubclass_=False):
super(ServiceReferenceType, self).exportChildren(outfile, level, namespace_, name_, True)
if self.RequestMessage is not None:
showIndent(outfile, level)
outfile.write('<%sRequestMessage>%s</%sRequestMessage>\n' % (namespace_, self.gds_format_string(quote_xml(self.RequestMessage).encode(ExternalEncoding), input_name='RequestMessage'), namespace_))
if self.RequestMessageReference is not None:
showIndent(outfile, level)
outfile.write('<%sRequestMessageReference>%s</%sRequestMessageReference>\n' % (namespace_, self.gds_format_string(quote_xml(self.RequestMessageReference).encode(ExternalEncoding), input_name='RequestMessageReference'), namespace_))
def hasContent_(self):
if (
self.RequestMessage is not None or
self.RequestMessageReference is not None or
super(ServiceReferenceType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceReferenceType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ServiceReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ServiceReferenceType, self).exportLiteralChildren(outfile, level, name_)
if self.RequestMessage is not None:
showIndent(outfile, level)
outfile.write('RequestMessage=%s,\n' % quote_python(self.RequestMessage).encode(ExternalEncoding))
if self.RequestMessageReference is not None:
showIndent(outfile, level)
outfile.write('RequestMessageReference=%s,\n' % quote_python(self.RequestMessageReference).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ServiceReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RequestMessage':
RequestMessage_ = child_.text
RequestMessage_ = self.gds_validate_string(RequestMessage_, node, 'RequestMessage')
self.RequestMessage = RequestMessage_
elif nodeName_ == 'RequestMessageReference':
RequestMessageReference_ = child_.text
RequestMessageReference_ = self.gds_validate_string(RequestMessageReference_, node, 'RequestMessageReference')
self.RequestMessageReference = RequestMessageReference_
super(ServiceReferenceType, self).buildChildren(child_, node, nodeName_, True)
# end class ServiceReferenceType
class DomainType(UnNamedDomainType):
"""Valid domain (or allowed set of values) of one quantity, with its
name or identifier. Name or identifier of this quantity."""
subclass = None
superclass = UnNamedDomainType
def __init__(self, AllowedValues=None, AnyValue=None, NoValues=None, ValuesReference=None, DefaultValue=None, Meaning=None, DataType=None, UOM=None, ReferenceSystem=None, Metadata=None, name=None):
super(DomainType, self).__init__(AllowedValues, AnyValue, NoValues, ValuesReference, DefaultValue, Meaning, DataType, UOM, ReferenceSystem, Metadata, )
self.name = _cast(None, name)
pass
def factory(*args_, **kwargs_):
if DomainType.subclass:
return DomainType.subclass(*args_, **kwargs_)
else:
return DomainType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='', name_='DomainType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DomainType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="DomainType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DomainType'):
super(DomainType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DomainType')
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DomainType', fromsubclass_=False):
super(DomainType, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(DomainType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DomainType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
super(DomainType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DomainType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
super(DomainType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(DomainType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class DomainType
class Theme(DescriptionType):
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, Theme=None, LayerRef=None):
super(Theme, self).__init__(Title, Abstract, Keywords, )
self.Identifier = Identifier
if Theme is None:
self.Theme = []
else:
self.Theme = Theme
if LayerRef is None:
self.LayerRef = []
else:
self.LayerRef = LayerRef
def factory(*args_, **kwargs_):
if Theme.subclass:
return Theme.subclass(*args_, **kwargs_)
else:
return Theme(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_Theme(self): return self.Theme
def set_Theme(self, Theme): self.Theme = Theme
def add_Theme(self, value): self.Theme.append(value)
def insert_Theme(self, index, value): self.Theme[index] = value
def get_LayerRef(self): return self.LayerRef
def set_LayerRef(self, LayerRef): self.LayerRef = LayerRef
def add_LayerRef(self, value): self.LayerRef.append(value)
def insert_LayerRef(self, index, value): self.LayerRef[index] = value
def export(self, outfile, level, namespace_='', name_='Theme', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Theme')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="Theme"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Theme'):
super(Theme, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Theme')
def exportChildren(self, outfile, level, namespace_='', name_='Theme', fromsubclass_=False):
super(Theme, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
for Theme_ in self.Theme:
Theme_.export(outfile, level, namespace_, name_='Theme')
for LayerRef_ in self.LayerRef:
showIndent(outfile, level)
outfile.write('<%sLayerRef>%s</%sLayerRef>\n' % (namespace_, self.gds_format_string(quote_xml(LayerRef_).encode(ExternalEncoding), input_name='LayerRef'), namespace_))
def hasContent_(self):
if (
self.Identifier is not None or
self.Theme or
self.LayerRef or
super(Theme, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Theme'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Theme, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Theme, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Theme=[\n')
level += 1
for Theme_ in self.Theme:
showIndent(outfile, level)
outfile.write('model_.Theme(\n')
Theme_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('LayerRef=[\n')
level += 1
for LayerRef_ in self.LayerRef:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(LayerRef_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Theme, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'Theme':
obj_ = Theme.factory()
obj_.build(child_)
self.Theme.append(obj_)
elif nodeName_ == 'LayerRef':
LayerRef_ = child_.text
LayerRef_ = self.gds_validate_string(LayerRef_, node, 'LayerRef')
self.LayerRef.append(LayerRef_)
super(Theme, self).buildChildren(child_, node, nodeName_, True)
# end class Theme
class TileMatrix(DescriptionType):
"""Describes a particular tile matrix."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, ScaleDenominator=None, TopLeftCorner=None, TileWidth=None, TileHeight=None, MatrixWidth=None, MatrixHeight=None):
super(TileMatrix, self).__init__(Title, Abstract, Keywords, )
self.Identifier = Identifier
self.ScaleDenominator = ScaleDenominator
self.TopLeftCorner = TopLeftCorner
self.TileWidth = TileWidth
self.TileHeight = TileHeight
self.MatrixWidth = MatrixWidth
self.MatrixHeight = MatrixHeight
def factory(*args_, **kwargs_):
if TileMatrix.subclass:
return TileMatrix.subclass(*args_, **kwargs_)
else:
return TileMatrix(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_ScaleDenominator(self): return self.ScaleDenominator
def set_ScaleDenominator(self, ScaleDenominator): self.ScaleDenominator = ScaleDenominator
def get_TopLeftCorner(self): return self.TopLeftCorner
def set_TopLeftCorner(self, TopLeftCorner): self.TopLeftCorner = TopLeftCorner
def validate_PositionType(self, value):
# Validate type PositionType, a restriction on double.
pass
def get_TileWidth(self): return self.TileWidth
def set_TileWidth(self, TileWidth): self.TileWidth = TileWidth
def get_TileHeight(self): return self.TileHeight
def set_TileHeight(self, TileHeight): self.TileHeight = TileHeight
def get_MatrixWidth(self): return self.MatrixWidth
def set_MatrixWidth(self, MatrixWidth): self.MatrixWidth = MatrixWidth
def get_MatrixHeight(self): return self.MatrixHeight
def set_MatrixHeight(self, MatrixHeight): self.MatrixHeight = MatrixHeight
def export(self, outfile, level, namespace_='', name_='TileMatrix', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TileMatrix')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="TileMatrix"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TileMatrix'):
super(TileMatrix, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TileMatrix')
def exportChildren(self, outfile, level, namespace_='', name_='TileMatrix', fromsubclass_=False):
super(TileMatrix, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
if self.ScaleDenominator is not None:
showIndent(outfile, level)
outfile.write('<%sScaleDenominator>%s</%sScaleDenominator>\n' % (namespace_, self.gds_format_double(self.ScaleDenominator, input_name='ScaleDenominator'), namespace_))
if self.TopLeftCorner is not None:
showIndent(outfile, level)
outfile.write('<%sTopLeftCorner>%s</%sTopLeftCorner>\n' % (namespace_, self.gds_format_double_list(self.TopLeftCorner, input_name='TopLeftCorner'), namespace_))
if self.TileWidth is not None:
showIndent(outfile, level)
outfile.write('<%sTileWidth>%s</%sTileWidth>\n' % (namespace_, self.gds_format_integer(self.TileWidth, input_name='TileWidth'), namespace_))
if self.TileHeight is not None:
showIndent(outfile, level)
outfile.write('<%sTileHeight>%s</%sTileHeight>\n' % (namespace_, self.gds_format_integer(self.TileHeight, input_name='TileHeight'), namespace_))
if self.MatrixWidth is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixWidth>%s</%sMatrixWidth>\n' % (namespace_, self.gds_format_integer(self.MatrixWidth, input_name='MatrixWidth'), namespace_))
if self.MatrixHeight is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixHeight>%s</%sMatrixHeight>\n' % (namespace_, self.gds_format_integer(self.MatrixHeight, input_name='MatrixHeight'), namespace_))
def hasContent_(self):
if (
self.Identifier is not None or
self.ScaleDenominator is not None or
self.TopLeftCorner is not None or
self.TileWidth is not None or
self.TileHeight is not None or
self.MatrixWidth is not None or
self.MatrixHeight is not None or
super(TileMatrix, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TileMatrix'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TileMatrix, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TileMatrix, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ScaleDenominator is not None:
showIndent(outfile, level)
outfile.write('ScaleDenominator=%e,\n' % self.ScaleDenominator)
if self.TopLeftCorner is not None:
showIndent(outfile, level)
outfile.write('TopLeftCorner=%e,\n' % self.TopLeftCorner)
if self.TileWidth is not None:
showIndent(outfile, level)
outfile.write('TileWidth=%d,\n' % self.TileWidth)
if self.TileHeight is not None:
showIndent(outfile, level)
outfile.write('TileHeight=%d,\n' % self.TileHeight)
if self.MatrixWidth is not None:
showIndent(outfile, level)
outfile.write('MatrixWidth=%d,\n' % self.MatrixWidth)
if self.MatrixHeight is not None:
showIndent(outfile, level)
outfile.write('MatrixHeight=%d,\n' % self.MatrixHeight)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(TileMatrix, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'ScaleDenominator':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'ScaleDenominator')
self.ScaleDenominator = fval_
elif nodeName_ == 'TopLeftCorner':
TopLeftCorner_ = child_.text
TopLeftCorner_ = self.gds_validate_double_list(TopLeftCorner_, node, 'TopLeftCorner')
self.TopLeftCorner = TopLeftCorner_
self.TopLeftCorner = self.TopLeftCorner.split()
self.validate_PositionType(self.TopLeftCorner) # validate type PositionType
elif nodeName_ == 'TileWidth':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'TileWidth')
self.TileWidth = ival_
elif nodeName_ == 'TileHeight':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'TileHeight')
self.TileHeight = ival_
elif nodeName_ == 'MatrixWidth':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MatrixWidth')
self.MatrixWidth = ival_
elif nodeName_ == 'MatrixHeight':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'MatrixHeight')
self.MatrixHeight = ival_
super(TileMatrix, self).buildChildren(child_, node, nodeName_, True)
# end class TileMatrix
class TileMatrixSet(DescriptionType):
"""Describes a particular set of tile matrices."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, BoundingBox=None, SupportedCRS=None, WellKnownScaleSet=None, TileMatrix=None):
super(TileMatrixSet, self).__init__(Title, Abstract, Keywords, )
self.Identifier = Identifier
self.BoundingBox = BoundingBox
self.SupportedCRS = SupportedCRS
self.WellKnownScaleSet = WellKnownScaleSet
if TileMatrix is None:
self.TileMatrix = []
else:
self.TileMatrix = TileMatrix
def factory(*args_, **kwargs_):
if TileMatrixSet.subclass:
return TileMatrixSet.subclass(*args_, **kwargs_)
else:
return TileMatrixSet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_BoundingBox(self): return self.BoundingBox
def set_BoundingBox(self, BoundingBox): self.BoundingBox = BoundingBox
def get_SupportedCRS(self): return self.SupportedCRS
def set_SupportedCRS(self, SupportedCRS): self.SupportedCRS = SupportedCRS
def get_WellKnownScaleSet(self): return self.WellKnownScaleSet
def set_WellKnownScaleSet(self, WellKnownScaleSet): self.WellKnownScaleSet = WellKnownScaleSet
def get_TileMatrix(self): return self.TileMatrix
def set_TileMatrix(self, TileMatrix): self.TileMatrix = TileMatrix
def add_TileMatrix(self, value): self.TileMatrix.append(value)
def insert_TileMatrix(self, index, value): self.TileMatrix[index] = value
def export(self, outfile, level, namespace_='', name_='TileMatrixSet', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='TileMatrixSet')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="TileMatrixSet"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TileMatrixSet'):
super(TileMatrixSet, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TileMatrixSet')
def exportChildren(self, outfile, level, namespace_='', name_='TileMatrixSet', fromsubclass_=False):
super(TileMatrixSet, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
if self.BoundingBox:
self.BoundingBox.export(outfile, level, namespace_, name_='BoundingBox')
if self.SupportedCRS is not None:
showIndent(outfile, level)
outfile.write('<%sows:SupportedCRS>%s</%sows:SupportedCRS>\n' % (namespace_, self.gds_format_string(quote_xml(self.SupportedCRS).encode(ExternalEncoding), input_name='SupportedCRS'), namespace_))
if self.WellKnownScaleSet is not None:
showIndent(outfile, level)
outfile.write('<%sWellKnownScaleSet>%s</%sWellKnownScaleSet>\n' % (namespace_, self.gds_format_string(quote_xml(self.WellKnownScaleSet).encode(ExternalEncoding), input_name='WellKnownScaleSet'), namespace_))
for TileMatrix_ in self.TileMatrix:
TileMatrix_.export(outfile, level, namespace_, name_='TileMatrix')
def hasContent_(self):
if (
self.Identifier is not None or
self.BoundingBox is not None or
self.SupportedCRS is not None or
self.WellKnownScaleSet is not None or
self.TileMatrix or
super(TileMatrixSet, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TileMatrixSet'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TileMatrixSet, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TileMatrixSet, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.BoundingBox is not None:
showIndent(outfile, level)
outfile.write('BoundingBox=model_.BoundingBox(\n')
self.BoundingBox.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SupportedCRS is not None:
showIndent(outfile, level)
outfile.write('SupportedCRS=%s,\n' % quote_python(self.SupportedCRS).encode(ExternalEncoding))
if self.WellKnownScaleSet is not None:
showIndent(outfile, level)
outfile.write('WellKnownScaleSet=%s,\n' % quote_python(self.WellKnownScaleSet).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('TileMatrix=[\n')
level += 1
for TileMatrix_ in self.TileMatrix:
showIndent(outfile, level)
outfile.write('model_.TileMatrix(\n')
TileMatrix_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(TileMatrixSet, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'BoundingBox':
obj_ = BoundingBoxType.factory()
obj_.build(child_)
self.set_BoundingBox(obj_)
elif nodeName_ == 'SupportedCRS':
SupportedCRS_ = child_.text
SupportedCRS_ = self.gds_validate_string(SupportedCRS_, node, 'SupportedCRS')
self.SupportedCRS = SupportedCRS_
elif nodeName_ == 'WellKnownScaleSet':
WellKnownScaleSet_ = child_.text
WellKnownScaleSet_ = self.gds_validate_string(WellKnownScaleSet_, node, 'WellKnownScaleSet')
self.WellKnownScaleSet = WellKnownScaleSet_
elif nodeName_ == 'TileMatrix':
obj_ = TileMatrix.factory()
obj_.build(child_)
self.TileMatrix.append(obj_)
super(TileMatrixSet, self).buildChildren(child_, node, nodeName_, True)
# end class TileMatrixSet
class Dimension(DescriptionType):
"""Metadata about a particular dimension that the tiles of a layer are
available."""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, Identifier=None, UOM=None, UnitSymbol=None, Default=None, Current=None, Value=None):
super(Dimension, self).__init__(Title, Abstract, Keywords, )
self.Identifier = Identifier
self.UOM = UOM
self.UnitSymbol = UnitSymbol
self.Default = Default
self.Current = Current
if Value is None:
self.Value = []
else:
self.Value = Value
def factory(*args_, **kwargs_):
if Dimension.subclass:
return Dimension.subclass(*args_, **kwargs_)
else:
return Dimension(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_UOM(self): return self.UOM
def set_UOM(self, UOM): self.UOM = UOM
def get_UnitSymbol(self): return self.UnitSymbol
def set_UnitSymbol(self, UnitSymbol): self.UnitSymbol = UnitSymbol
def get_Default(self): return self.Default
def set_Default(self, Default): self.Default = Default
def get_Current(self): return self.Current
def set_Current(self, Current): self.Current = Current
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def add_Value(self, value): self.Value.append(value)
def insert_Value(self, index, value): self.Value[index] = value
def export(self, outfile, level, namespace_='', name_='Dimension', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Dimension')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="Dimension"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Dimension'):
super(Dimension, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Dimension')
def exportChildren(self, outfile, level, namespace_='', name_='Dimension', fromsubclass_=False):
super(Dimension, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
if self.UOM:
self.UOM.export(outfile, level, namespace_, name_='UOM')
if self.UnitSymbol is not None:
showIndent(outfile, level)
outfile.write('<%sUnitSymbol>%s</%sUnitSymbol>\n' % (namespace_, self.gds_format_string(quote_xml(self.UnitSymbol).encode(ExternalEncoding), input_name='UnitSymbol'), namespace_))
if self.Default is not None:
showIndent(outfile, level)
outfile.write('<%sDefault>%s</%sDefault>\n' % (namespace_, self.gds_format_string(quote_xml(self.Default).encode(ExternalEncoding), input_name='Default'), namespace_))
if self.Current is not None:
showIndent(outfile, level)
outfile.write('<%sCurrent>%s</%sCurrent>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.Current)), input_name='Current'), namespace_))
for Value_ in self.Value:
showIndent(outfile, level)
outfile.write('<%sValue>%s</%sValue>\n' % (namespace_, self.gds_format_string(quote_xml(Value_).encode(ExternalEncoding), input_name='Value'), namespace_))
def hasContent_(self):
if (
self.Identifier is not None or
self.UOM is not None or
self.UnitSymbol is not None or
self.Default is not None or
self.Current is not None or
self.Value or
super(Dimension, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Dimension'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Dimension, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Dimension, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.UOM is not None:
showIndent(outfile, level)
outfile.write('UOM=model_.UOM(\n')
self.UOM.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.UnitSymbol is not None:
showIndent(outfile, level)
outfile.write('UnitSymbol=%s,\n' % quote_python(self.UnitSymbol).encode(ExternalEncoding))
if self.Default is not None:
showIndent(outfile, level)
outfile.write('Default=%s,\n' % quote_python(self.Default).encode(ExternalEncoding))
if self.Current is not None:
showIndent(outfile, level)
outfile.write('Current=%s,\n' % self.Current)
showIndent(outfile, level)
outfile.write('Value=[\n')
level += 1
for Value_ in self.Value:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Value_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Dimension, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'UOM':
obj_ = DomainMetadataType.factory()
obj_.build(child_)
self.set_UOM(obj_)
elif nodeName_ == 'UnitSymbol':
UnitSymbol_ = child_.text
UnitSymbol_ = self.gds_validate_string(UnitSymbol_, node, 'UnitSymbol')
self.UnitSymbol = UnitSymbol_
elif nodeName_ == 'Default':
Default_ = child_.text
Default_ = self.gds_validate_string(Default_, node, 'Default')
self.Default = Default_
elif nodeName_ == 'Current':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'Current')
self.Current = ival_
elif nodeName_ == 'Value':
Value_ = child_.text
Value_ = self.gds_validate_string(Value_, node, 'Value')
self.Value.append(Value_)
super(Dimension, self).buildChildren(child_, node, nodeName_, True)
# end class Dimension
class LegendURL(OnlineResourceType):
"""Zero or more LegendURL elements may be provided, providing an
image(s) of a legend relevant to each Style of a Layer. The
Format element indicates the MIME type of the legend.
minScaleDenominator and maxScaleDenominator attributes may be
provided to indicate to the client which scale(s) (inclusive)
the legend image is appropriate for. (If provided, these values
must exactly match the scale denominators of available
TileMatrixes.) width and height attributes may be provided to
assist client applications in laying out space to display the
legend. The URL from which the legend image can be retrievedA
supported output format for the legend imageDenominator of the
minimum scale (inclusive) for which this legend image is
validDenominator of the maximum scale (exclusive) for which this
legend image is validWidth (in pixels) of the legend imageHeight
(in pixels) of the legend image"""
subclass = None
superclass = OnlineResourceType
def __init__(self, title=None, arcrole=None, actuate=None, href=None, role=None, show=None, type_=None, height=None, minScaleDenominator=None, maxScaleDenominator=None, width=None, format=None, valueOf_=None):
super(LegendURL, self).__init__(title, arcrole, actuate, href, role, show, type_, valueOf_, )
self.height = _cast(int, height)
self.minScaleDenominator = _cast(float, minScaleDenominator)
self.maxScaleDenominator = _cast(float, maxScaleDenominator)
self.width = _cast(int, width)
self.format = _cast(None, format)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if LegendURL.subclass:
return LegendURL.subclass(*args_, **kwargs_)
else:
return LegendURL(*args_, **kwargs_)
factory = staticmethod(factory)
def get_height(self): return self.height
def set_height(self, height): self.height = height
def get_minScaleDenominator(self): return self.minScaleDenominator
def set_minScaleDenominator(self, minScaleDenominator): self.minScaleDenominator = minScaleDenominator
def get_maxScaleDenominator(self): return self.maxScaleDenominator
def set_maxScaleDenominator(self, maxScaleDenominator): self.maxScaleDenominator = maxScaleDenominator
def get_width(self): return self.width
def set_width(self, width): self.width = width
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='LegendURL', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='LegendURL')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="LegendURL"')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LegendURL'):
super(LegendURL, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LegendURL')
if self.height is not None and 'height' not in already_processed:
already_processed.append('height')
outfile.write(' height="%s"' % self.gds_format_integer(self.height, input_name='height'))
if self.minScaleDenominator is not None and 'minScaleDenominator' not in already_processed:
already_processed.append('minScaleDenominator')
outfile.write(' minScaleDenominator="%s"' % self.gds_format_double(self.minScaleDenominator, input_name='minScaleDenominator'))
if self.maxScaleDenominator is not None and 'maxScaleDenominator' not in already_processed:
already_processed.append('maxScaleDenominator')
outfile.write(' maxScaleDenominator="%s"' % self.gds_format_double(self.maxScaleDenominator, input_name='maxScaleDenominator'))
if self.width is not None and 'width' not in already_processed:
already_processed.append('width')
outfile.write(' width="%s"' % self.gds_format_integer(self.width, input_name='width'))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
outfile.write(' format=%s' % (quote_attrib(self.format), ))
def exportChildren(self, outfile, level, namespace_='', name_='LegendURL', fromsubclass_=False):
super(LegendURL, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(LegendURL, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='LegendURL'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.height is not None and 'height' not in already_processed:
already_processed.append('height')
showIndent(outfile, level)
outfile.write('height = %d,\n' % (self.height,))
if self.minScaleDenominator is not None and 'minScaleDenominator' not in already_processed:
already_processed.append('minScaleDenominator')
showIndent(outfile, level)
outfile.write('minScaleDenominator = %e,\n' % (self.minScaleDenominator,))
if self.maxScaleDenominator is not None and 'maxScaleDenominator' not in already_processed:
already_processed.append('maxScaleDenominator')
showIndent(outfile, level)
outfile.write('maxScaleDenominator = %e,\n' % (self.maxScaleDenominator,))
if self.width is not None and 'width' not in already_processed:
already_processed.append('width')
showIndent(outfile, level)
outfile.write('width = %d,\n' % (self.width,))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
showIndent(outfile, level)
outfile.write('format = %s,\n' % (self.format,))
super(LegendURL, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LegendURL, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('height', node)
if value is not None and 'height' not in already_processed:
already_processed.append('height')
try:
self.height = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.height <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('minScaleDenominator', node)
if value is not None and 'minScaleDenominator' not in already_processed:
already_processed.append('minScaleDenominator')
try:
self.minScaleDenominator = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (minScaleDenominator): %s' % exp)
value = find_attr_value_('maxScaleDenominator', node)
if value is not None and 'maxScaleDenominator' not in already_processed:
already_processed.append('maxScaleDenominator')
try:
self.maxScaleDenominator = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (maxScaleDenominator): %s' % exp)
value = find_attr_value_('width', node)
if value is not None and 'width' not in already_processed:
already_processed.append('width')
try:
self.width = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.width <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('format', node)
if value is not None and 'format' not in already_processed:
already_processed.append('format')
self.format = value
super(LegendURL, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LegendURL, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LegendURL
class Style(DescriptionType):
"""This style is used when no style is specified"""
subclass = None
superclass = DescriptionType
def __init__(self, Title=None, Abstract=None, Keywords=None, isDefault=None, Identifier=None, LegendURL=None):
super(Style, self).__init__(Title, Abstract, Keywords, )
self.isDefault = _cast(bool, isDefault)
self.Identifier = Identifier
if LegendURL is None:
self.LegendURL = []
else:
self.LegendURL = LegendURL
def factory(*args_, **kwargs_):
if Style.subclass:
return Style.subclass(*args_, **kwargs_)
else:
return Style(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Identifier(self): return self.Identifier
def set_Identifier(self, Identifier): self.Identifier = Identifier
def get_LegendURL(self): return self.LegendURL
def set_LegendURL(self, LegendURL): self.LegendURL = LegendURL
def add_LegendURL(self, value): self.LegendURL.append(value)
def insert_LegendURL(self, index, value): self.LegendURL[index] = value
def get_isDefault(self): return self.isDefault
def set_isDefault(self, isDefault): self.isDefault = isDefault
def export(self, outfile, level, namespace_='', name_='Style', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Style')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="Style"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Style'):
super(Style, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Style')
if self.isDefault is not None and 'isDefault' not in already_processed:
already_processed.append('isDefault')
outfile.write(' isDefault="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.isDefault)), input_name='isDefault'))
def exportChildren(self, outfile, level, namespace_='', name_='Style', fromsubclass_=False):
super(Style, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Identifier:
self.Identifier.export(outfile, level, namespace_, name_='Identifier', )
for LegendURL_ in self.LegendURL:
LegendURL_.export(outfile, level, namespace_, name_='LegendURL')
def hasContent_(self):
if (
self.Identifier is not None or
self.LegendURL or
super(Style, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Style'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.isDefault is not None and 'isDefault' not in already_processed:
already_processed.append('isDefault')
showIndent(outfile, level)
outfile.write('isDefault = %s,\n' % (self.isDefault,))
super(Style, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Style, self).exportLiteralChildren(outfile, level, name_)
if self.Identifier is not None:
showIndent(outfile, level)
outfile.write('Identifier=model_.Identifier(\n')
self.Identifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('LegendURL=[\n')
level += 1
for LegendURL_ in self.LegendURL:
showIndent(outfile, level)
outfile.write('model_.LegendURL(\n')
LegendURL_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('isDefault', node)
if value is not None and 'isDefault' not in already_processed:
already_processed.append('isDefault')
if value in ('true', '1'):
self.isDefault = True
elif value in ('false', '0'):
self.isDefault = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(Style, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Identifier':
obj_ = CodeType.factory()
obj_.build(child_)
self.set_Identifier(obj_)
elif nodeName_ == 'LegendURL':
obj_ = LegendURL.factory()
obj_.build(child_)
self.LegendURL.append(obj_)
super(Style, self).buildChildren(child_, node, nodeName_, True)
# end class Style
class LayerType(DatasetDescriptionSummaryBaseType):
subclass = None
superclass = DatasetDescriptionSummaryBaseType
def __init__(self, Title=None, Abstract=None, Keywords=None, WGS84BoundingBox=None, Identifier=None, BoundingBox=None, Metadata=None, DatasetDescriptionSummary=None, Style=None, Format=None, InfoFormat=None, Dimension=None, TileMatrixSetLink=None, ResourceURL=None):
super(LayerType, self).__init__(Title, Abstract, Keywords, WGS84BoundingBox, Identifier, BoundingBox, Metadata, DatasetDescriptionSummary, )
if Style is None:
self.Style = []
else:
self.Style = Style
if Format is None:
self.Format = []
else:
self.Format = Format
if InfoFormat is None:
self.InfoFormat = []
else:
self.InfoFormat = InfoFormat
if Dimension is None:
self.Dimension = []
else:
self.Dimension = Dimension
if TileMatrixSetLink is None:
self.TileMatrixSetLink = []
else:
self.TileMatrixSetLink = TileMatrixSetLink
if ResourceURL is None:
self.ResourceURL = []
else:
self.ResourceURL = ResourceURL
def factory(*args_, **kwargs_):
if LayerType.subclass:
return LayerType.subclass(*args_, **kwargs_)
else:
return LayerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Style(self): return self.Style
def set_Style(self, Style): self.Style = Style
def add_Style(self, value): self.Style.append(value)
def insert_Style(self, index, value): self.Style[index] = value
def get_Format(self): return self.Format
def set_Format(self, Format): self.Format = Format
def add_Format(self, value): self.Format.append(value)
def insert_Format(self, index, value): self.Format[index] = value
def validate_MimeType(self, value):
# Validate type MimeType, a restriction on string.
pass
def get_InfoFormat(self): return self.InfoFormat
def set_InfoFormat(self, InfoFormat): self.InfoFormat = InfoFormat
def add_InfoFormat(self, value): self.InfoFormat.append(value)
def insert_InfoFormat(self, index, value): self.InfoFormat[index] = value
def get_Dimension(self): return self.Dimension
def set_Dimension(self, Dimension): self.Dimension = Dimension
def add_Dimension(self, value): self.Dimension.append(value)
def insert_Dimension(self, index, value): self.Dimension[index] = value
def get_TileMatrixSetLink(self): return self.TileMatrixSetLink
def set_TileMatrixSetLink(self, TileMatrixSetLink): self.TileMatrixSetLink = TileMatrixSetLink
def add_TileMatrixSetLink(self, value): self.TileMatrixSetLink.append(value)
def insert_TileMatrixSetLink(self, index, value): self.TileMatrixSetLink[index] = value
def get_ResourceURL(self): return self.ResourceURL
def set_ResourceURL(self, ResourceURL): self.ResourceURL = ResourceURL
def add_ResourceURL(self, value): self.ResourceURL.append(value)
def insert_ResourceURL(self, index, value): self.ResourceURL[index] = value
def export(self, outfile, level, namespace_='', name_='LayerType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='LayerType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="LayerType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LayerType'):
super(LayerType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LayerType')
def exportChildren(self, outfile, level, namespace_='', name_='LayerType', fromsubclass_=False):
super(LayerType, self).exportChildren(outfile, level, namespace_, name_, True)
for Style_ in self.Style:
Style_.export(outfile, level, namespace_, name_='Style')
for Format_ in self.Format:
showIndent(outfile, level)
outfile.write('<%sFormat>%s</%sFormat>\n' % (namespace_, self.gds_format_string(quote_xml(Format_).encode(ExternalEncoding), input_name='Format'), namespace_))
for InfoFormat_ in self.InfoFormat:
showIndent(outfile, level)
outfile.write('<%sInfoFormat>%s</%sInfoFormat>\n' % (namespace_, self.gds_format_string(quote_xml(InfoFormat_).encode(ExternalEncoding), input_name='InfoFormat'), namespace_))
for Dimension_ in self.Dimension:
Dimension_.export(outfile, level, namespace_, name_='Dimension')
for TileMatrixSetLink_ in self.TileMatrixSetLink:
TileMatrixSetLink_.export(outfile, level, namespace_, name_='TileMatrixSetLink')
for ResourceURL_ in self.ResourceURL:
ResourceURL_.export(outfile, level, namespace_, name_='ResourceURL')
def hasContent_(self):
if (
self.Style or
self.Format or
self.InfoFormat or
self.Dimension or
self.TileMatrixSetLink or
self.ResourceURL or
super(LayerType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='LayerType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(LayerType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LayerType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Style=[\n')
level += 1
for Style_ in self.Style:
showIndent(outfile, level)
outfile.write('model_.Style(\n')
Style_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Format=[\n')
level += 1
for Format_ in self.Format:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Format_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('InfoFormat=[\n')
level += 1
for InfoFormat_ in self.InfoFormat:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(InfoFormat_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Dimension=[\n')
level += 1
for Dimension_ in self.Dimension:
showIndent(outfile, level)
outfile.write('model_.Dimension(\n')
Dimension_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('TileMatrixSetLink=[\n')
level += 1
for TileMatrixSetLink_ in self.TileMatrixSetLink:
showIndent(outfile, level)
outfile.write('model_.TileMatrixSetLink(\n')
TileMatrixSetLink_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('ResourceURL=[\n')
level += 1
for ResourceURL_ in self.ResourceURL:
showIndent(outfile, level)
outfile.write('model_.URLTemplateType(\n')
ResourceURL_.exportLiteral(outfile, level, name_='URLTemplateType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(LayerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Style':
obj_ = Style.factory()
obj_.build(child_)
self.Style.append(obj_)
elif nodeName_ == 'Format':
Format_ = child_.text
Format_ = self.gds_validate_string(Format_, node, 'Format')
self.Format.append(Format_)
self.validate_MimeType(self.Format) # validate type MimeType
elif nodeName_ == 'InfoFormat':
InfoFormat_ = child_.text
InfoFormat_ = self.gds_validate_string(InfoFormat_, node, 'InfoFormat')
self.InfoFormat.append(InfoFormat_)
self.validate_MimeType(self.InfoFormat) # validate type MimeType
elif nodeName_ == 'Dimension':
obj_ = Dimension.factory()
obj_.build(child_)
self.Dimension.append(obj_)
elif nodeName_ == 'TileMatrixSetLink':
obj_ = TileMatrixSetLink.factory()
obj_.build(child_)
self.TileMatrixSetLink.append(obj_)
elif nodeName_ == 'ResourceURL':
obj_ = URLTemplateType.factory()
obj_.build(child_)
self.ResourceURL.append(obj_)
super(LayerType, self).buildChildren(child_, node, nodeName_, True)
# end class LayerType
class ContentsType(ContentsBaseType):
subclass = None
superclass = ContentsBaseType
def __init__(self, DatasetDescriptionSummary=None, OtherSource=None, TileMatrixSet=None):
super(ContentsType, self).__init__(DatasetDescriptionSummary, OtherSource, )
if TileMatrixSet is None:
self.TileMatrixSet = []
else:
self.TileMatrixSet = TileMatrixSet
def factory(*args_, **kwargs_):
if ContentsType.subclass:
return ContentsType.subclass(*args_, **kwargs_)
else:
return ContentsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TileMatrixSet(self): return self.TileMatrixSet
def set_TileMatrixSet(self, TileMatrixSet): self.TileMatrixSet = TileMatrixSet
def add_TileMatrixSet(self, value): self.TileMatrixSet.append(value)
def insert_TileMatrixSet(self, index, value): self.TileMatrixSet[index] = value
def export(self, outfile, level, namespace_='', name_='ContentsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ContentsType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
# outfile.write(' xsi:type="ContentsType"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContentsType'):
super(ContentsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContentsType')
def exportChildren(self, outfile, level, namespace_='', name_='ContentsType', fromsubclass_=False):
super(ContentsType, self).exportChildren(outfile, level, namespace_, name_, True)
for TileMatrixSet_ in self.TileMatrixSet:
TileMatrixSet_.export(outfile, level, namespace_, name_='TileMatrixSet')
def hasContent_(self):
if (
self.TileMatrixSet or
super(ContentsType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ContentsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ContentsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContentsType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('TileMatrixSet=[\n')
level += 1
for TileMatrixSet_ in self.TileMatrixSet:
showIndent(outfile, level)
outfile.write('model_.TileMatrixSet(\n')
TileMatrixSet_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ContentsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TileMatrixSet':
obj_ = TileMatrixSet.factory()
obj_.build(child_)
self.TileMatrixSet.append(obj_)
super(ContentsType, self).buildChildren(child_, node, nodeName_, True)
# end class ContentsType
class Capabilities(CapabilitiesBaseType):
"""XML defines the WMTS GetCapabilities operation response.
ServiceMetadata document provides clients with service metadata
about a specific service instance, including metadata about the
tightly-coupled data served. If the server does not implement
the updateSequence parameter, the server SHALL always return the
complete Capabilities document, without the updateSequence
parameter. When the server implements the updateSequence
parameter and the GetCapabilities operation request included the
updateSequence parameter with the current value, the server
SHALL return this element with only the "version" and
"updateSequence" attributes. Otherwise, all optional elements
SHALL be included or not depending on the actual value of the
Contents parameter in the GetCapabilities operation request."""
subclass = None
superclass = CapabilitiesBaseType
def __init__(self, updateSequence=None, version=None, ServiceIdentification=None, ServiceProvider=None, OperationsMetadata=None, Contents=None, Themes=None, WSDL=None, ServiceMetadataURL=None):
super(Capabilities, self).__init__(updateSequence, version, ServiceIdentification, ServiceProvider, OperationsMetadata, )
self.Contents = Contents
if Themes is None:
self.Themes = []
else:
self.Themes = Themes
if WSDL is None:
self.WSDL = []
else:
self.WSDL = WSDL
if ServiceMetadataURL is None:
self.ServiceMetadataURL = []
else:
self.ServiceMetadataURL = ServiceMetadataURL
def factory(*args_, **kwargs_):
if Capabilities.subclass:
return Capabilities.subclass(*args_, **kwargs_)
else:
return Capabilities(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Contents(self): return self.Contents
def set_Contents(self, Contents): self.Contents = Contents
def get_Themes(self): return self.Themes
def set_Themes(self, Themes): self.Themes = Themes
def add_Themes(self, value): self.Themes.append(value)
def insert_Themes(self, index, value): self.Themes[index] = value
def get_WSDL(self): return self.WSDL
def set_WSDL(self, WSDL): self.WSDL = WSDL
def add_WSDL(self, value): self.WSDL.append(value)
def insert_WSDL(self, index, value): self.WSDL[index] = value
def get_ServiceMetadataURL(self): return self.ServiceMetadataURL
def set_ServiceMetadataURL(self, ServiceMetadataURL): self.ServiceMetadataURL = ServiceMetadataURL
def add_ServiceMetadataURL(self, value): self.ServiceMetadataURL.append(value)
def insert_ServiceMetadataURL(self, index, value): self.ServiceMetadataURL[index] = value
def export(self, outfile, level, namespace_='', name_='Capabilities', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='Capabilities')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xmlns="http://www.opengis.net/wmts/1.0"')
outfile.write(' xmlns:ows="http://www.opengis.net/ows/1.1"')
outfile.write(' xmlns:xlink="http://www.w3.org/1999/xlink"')
outfile.write(' xmlns:gml="http://www.opengis.net/gml"')
outfile.write(' xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd"')
# outfile.write(' xsi:type="Capabilities"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Capabilities'):
super(Capabilities, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Capabilities')
def exportChildren(self, outfile, level, namespace_='', name_='Capabilities', fromsubclass_=False):
super(Capabilities, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Contents:
self.Contents.export(outfile, level, namespace_, name_='Contents')
for Themes_ in self.Themes:
Themes_.export(outfile, level, namespace_, name_='Themes')
for WSDL_ in self.WSDL:
WSDL_.export(outfile, level, namespace_, name_='WSDL')
for ServiceMetadataURL_ in self.ServiceMetadataURL:
ServiceMetadataURL_.export(outfile, level, namespace_, name_='ServiceMetadataURL')
def hasContent_(self):
if (
self.Contents is not None or
self.Themes or
self.WSDL or
self.ServiceMetadataURL or
super(Capabilities, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Capabilities'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(Capabilities, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Capabilities, self).exportLiteralChildren(outfile, level, name_)
if self.Contents is not None:
showIndent(outfile, level)
outfile.write('Contents=model_.ContentsType(\n')
self.Contents.exportLiteral(outfile, level, name_='Contents')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Themes=[\n')
level += 1
for Themes_ in self.Themes:
showIndent(outfile, level)
outfile.write('model_.Themes(\n')
Themes_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('WSDL=[\n')
level += 1
for WSDL_ in self.WSDL:
showIndent(outfile, level)
outfile.write('model_.OnlineResourceType(\n')
WSDL_.exportLiteral(outfile, level, name_='OnlineResourceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('ServiceMetadataURL=[\n')
level += 1
for ServiceMetadataURL_ in self.ServiceMetadataURL:
showIndent(outfile, level)
outfile.write('model_.OnlineResourceType(\n')
ServiceMetadataURL_.exportLiteral(outfile, level, name_='OnlineResourceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(Capabilities, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Contents':
obj_ = ContentsType.factory()
obj_.build(child_)
self.set_Contents(obj_)
elif nodeName_ == 'Themes':
obj_ = Themes.factory()
obj_.build(child_)
self.Themes.append(obj_)
elif nodeName_ == 'WSDL':
obj_ = OnlineResourceType.factory()
obj_.build(child_)
self.WSDL.append(obj_)
elif nodeName_ == 'ServiceMetadataURL':
obj_ = OnlineResourceType.factory()
obj_.build(child_)
self.ServiceMetadataURL.append(obj_)
super(Capabilities, self).buildChildren(child_, node, nodeName_, True)
# end class Capabilities
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Capabilities'
rootClass = Capabilities
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Capabilities'
rootClass = Capabilities
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="Capabilities",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Capabilities'
rootClass = Capabilities
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from capabilities import *\n\n')
sys.stdout.write('import capabilities as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AbstractMetaData",
"AbstractReferenceBaseType",
"AcceptFormatsType",
"AcceptVersionsType",
"AddressType",
"AllowedValues",
"AnyValue",
"BasicIdentificationType",
"BoundingBoxType",
"Capabilities",
"CapabilitiesBaseType",
"CodeType",
"ContactType",
"ContentsBaseType",
"ContentsType",
"DCP",
"DatasetDescriptionSummaryBaseType",
"DescriptionType",
"Dimension",
"DomainMetadataType",
"DomainType",
"ExceptionReport",
"ExceptionType",
"GetCapabilitiesType",
"GetResourceByIdType",
"HTTP",
"IdentificationType",
"KeywordsType",
"LanguageStringType",
"LayerType",
"LegendURL",
"ManifestType",
"MetadataType",
"NoValues",
"OnlineResourceType",
"Operation",
"OperationsMetadata",
"RangeType",
"ReferenceGroupType",
"ReferenceType",
"RequestMethodType",
"Resource",
"ResponsiblePartySubsetType",
"ResponsiblePartyType",
"SectionsType",
"ServiceIdentification",
"ServiceProvider",
"ServiceReferenceType",
"Style",
"TelephoneType",
"Theme",
"Themes",
"TileMatrix",
"TileMatrixLimits",
"TileMatrixSet",
"TileMatrixSetLimits",
"TileMatrixSetLink",
"URLTemplateType",
"UnNamedDomainType",
"ValueType",
"ValuesReference",
"WGS84BoundingBoxType"
]
| apache-2.0 |
schets/scikit-learn | examples/cluster/plot_digits_linkage.py | 366 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
roberttk01/TensorFlowTutorial | TensorFlowTutorial/pt35_handling_non-numerical_data.py | 1 | 1098 | import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing, cross_validation
import pandas as pd
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = pd.read_excel('./etc/titanic.xls')
# print(df.head())
df.drop(['body', 'name'], 1, inplace=True)
df.convert_objects(convert_numeric=True)
df.fillna(0, inplace=True)
# print(df.head())
df = handle_non_numerical_data(df)
print(df.head()) | mit |
moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_10_Codes/learn-xor.py | 4 | 1615 | from pybrain.datasets import SupervisedDataSet
from pybrain.structure import SigmoidLayer, LinearLayer, FullConnection, FeedForwardNetwork, RecurrentNetwork, Network
from pybrain.supervised.trainers import BackpropTrainer
network = Network()
input_layer = LinearLayer(2)
hidden_layer = SigmoidLayer(5)
output_layer = LinearLayer(1)
network.addInputModule(input_layer)
network.addModule(hidden_layer)
network.addOutputModule(output_layer)
input_to_hidden = FullConnection(input_layer, hidden_layer)
hidden_to_output = FullConnection(hidden_layer, output_layer)
network.addConnection(input_to_hidden)
network.addConnection(hidden_to_output)
network.sortModules()
xor_dataset = SupervisedDataSet(2,1)
xor_dataset.addSample((0, 0), (0, ))
xor_dataset.addSample((0, 1), (1, ))
xor_dataset.addSample((1, 0), (1, ))
xor_dataset.addSample((1, 1), (0, ))
trainer = BackpropTrainer(module=network, dataset=xor_dataset, verbose=True,
momentum=0.00,
learningrate=0.10,
weightdecay=0.0,
lrdecay=1.0)
error = 1
epochsToTrain = 0
while error > 0.0001:
epochsToTrain += 1
error = trainer.train()
print ''
print 'Trained after', epochsToTrain, 'epochs'
# The network has been trained, now test it against our original data.
# Consider any number above 0.5 to be evaluated as 1, and below to be 0
print ''
print 'Final Results'
print '--------------'
results = network.activateOnDataset(xor_dataset)
for i in range(len(results)):
print xor_dataset['input'][i], ' => ', (results[i] > 0.5), ' (',results[i],')'
| gpl-3.0 |
rflamary/POT | examples/plot_compute_emd.py | 2 | 2405 | # -*- coding: utf-8 -*-
"""
=================
Plot multiple EMD
=================
Shows how to compute multiple EMD and Sinkhorn with two differnt
ground metrics and plot their values for diffeent distributions.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
from ot.datasets import make_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
n_target = 50 # nb target distributions
# bin positions
x = np.arange(n, dtype=np.float64)
lst_m = np.linspace(20, 90, n_target)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
B = np.zeros((n, n_target))
for i, m in enumerate(lst_m):
B[:, i] = gauss(n, m=m, s=5)
# loss matrix and normalization
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'euclidean')
M /= M.max()
M2 = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'sqeuclidean')
M2 /= M2.max()
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1)
pl.subplot(2, 1, 1)
pl.plot(x, a, 'b', label='Source distribution')
pl.title('Source distribution')
pl.subplot(2, 1, 2)
pl.plot(x, B, label='Target distributions')
pl.title('Target distributions')
pl.tight_layout()
##############################################################################
# Compute EMD for the different losses
# ------------------------------------
#%% Compute and plot distributions and loss matrix
d_emd = ot.emd2(a, B, M) # direct computation of EMD
d_emd2 = ot.emd2(a, B, M2) # direct computation of EMD with loss M2
pl.figure(2)
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.title('EMD distances')
pl.legend()
##############################################################################
# Compute Sinkhorn for the different losses
# -----------------------------------------
#%%
reg = 1e-2
d_sinkhorn = ot.sinkhorn2(a, B, M, reg)
d_sinkhorn2 = ot.sinkhorn2(a, B, M2, reg)
pl.figure(2)
pl.clf()
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.plot(d_sinkhorn, '+', label='Euclidean Sinkhorn')
pl.plot(d_sinkhorn2, '+', label='Squared Euclidean Sinkhorn')
pl.title('EMD distances')
pl.legend()
pl.show()
| mit |
uglyboxer/learn_nums | depricated/learn_nums.py | 1 | 1384 | from sklearn import datasets
from check_new import check_new
from learn_loop import learn_loop
""" A rudimentary implementation of a perceptron
Takes in a data set with desired outputs & new unmatched data set
Outputs guesses against new data
"""
def learn_nums(digits, answers):
""" Takes in a data set and the appropriate answers
Returns the appropriate weight set
"""
weight_set_of = [learn_loop(digits, answers, x) for x in range(10)]
return weight_set_of
def run_blind_data(digits, answers, weights):
""" Brings in a pack of untested data and learned weights
Returns guesses and success ratio
"""
successes = 0
for temp in range(len(digits)):
guess = check_new(digits[temp], weights)
actual = answers[temp]
print("Computer's guess: {} Actual #: {}".format(guess, actual))
if guess == actual:
successes += 1
success_ratio = successes/len(digits)
return successes, success_ratio
if __name__ == '__main__':
digits = datasets.load_digits()
answers, answers_to_test = digits.target[:1000], digits.target[1000:]
sliced_digits, unlearned_digits = digits.data[:1000], digits.data[1000:]
weights = learn_nums(sliced_digits, answers)
results = run_blind_data(unlearned_digits, answers_to_test, weights)
print("Computer was right {} times out of {}".format(results[0], len(digits.data)-1000))
print("For a correct percentage of {}%".format(results[1])) | unlicense |
nhuntwalker/astroML | book_figures/chapter6/fig_great_wall.py | 4 | 4280 | """
Great Wall Density
------------------
Figure 6.4
Density estimation for galaxies within the SDSS "Great Wall." The upper-left
panel shows points that are galaxies, projected by their spatial locations
onto the equatorial plane (declination ~ 0 degrees). The remaining panels
show estimates of the density of these points using kernel density estimation
(with a Gaussian kernel with width 5Mpc), a K-nearest-neighbor estimator
(eq. 6.15) optimized for a small-scale structure (with K = 5), and a
K-nearest-neighbor estimator optimized for a large-scale structure
(with K = 40).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from scipy.spatial import cKDTree
from astroML.datasets import fetch_great_wall
from astroML.density_estimation import KDE, KNeighborsDensity
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the great wall data
X = fetch_great_wall()
#------------------------------------------------------------
# Create the grid on which to evaluate the results
Nx = 50
Ny = 125
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Evaluate for several models
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
kde = KDE(metric='gaussian', h=5)
dens_KDE = kde.fit(X).eval(Xgrid).reshape((Ny, Nx))
knn5 = KNeighborsDensity('bayesian', 5)
dens_k5 = knn5.fit(X).eval(Xgrid).reshape((Ny, Nx))
knn40 = KNeighborsDensity('bayesian', 40)
dens_k40 = knn40.fit(X).eval(Xgrid).reshape((Ny, Nx))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.2))
fig.subplots_adjust(left=0.12, right=0.95, bottom=0.2, top=0.9,
hspace=0.01, wspace=0.01)
# First plot: scatter the points
ax1 = plt.subplot(221, aspect='equal')
ax1.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax1.text(0.95, 0.9, "input", ha='right', va='top',
transform=ax1.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Second plot: KDE
ax2 = plt.subplot(222, aspect='equal')
ax2.imshow(dens_KDE.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax2.text(0.95, 0.9, "KDE: Gaussian $(h=5)$", ha='right', va='top',
transform=ax2.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Third plot: KNN, k=5
ax3 = plt.subplot(223, aspect='equal')
ax3.imshow(dens_k5.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax3.text(0.95, 0.9, "$k$-neighbors $(k=5)$", ha='right', va='top',
transform=ax3.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
# Fourth plot: KNN, k=40
ax4 = plt.subplot(224, aspect='equal')
ax4.imshow(dens_k40.T, origin='lower', norm=LogNorm(),
extent=(ymin, ymax, xmin, xmax), cmap=plt.cm.binary)
ax4.text(0.95, 0.9, "$k$-neighbors $(k=40)$", ha='right', va='top',
transform=ax4.transAxes,
bbox=dict(boxstyle='round', ec='k', fc='w'))
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xlim(ymin, ymax - 0.01)
ax.set_ylim(xmin, xmax)
for ax in [ax1, ax2]:
ax.xaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax3, ax4]:
ax.set_xlabel('$y$ (Mpc)')
for ax in [ax2, ax4]:
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in [ax1, ax3]:
ax.set_ylabel('$x$ (Mpc)')
plt.show()
| bsd-2-clause |
GabrielRubin/TC2 | Client/MLTests.py | 1 | 2724 | import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import csv
import seaborn as sns
from matplotlib import rcParams
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from Tkinter import Tk
from tkFileDialog import askopenfilename
import cPickle
class MyDataFile():
def __init__(self, data, target, featureNames):
self.data = data
self.target = target
self.featureNames = featureNames
def LoadDataset(path, fileName):
with open("{0}/{1}.csv".format(path, fileName)) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, sample in enumerate(data_file):
data[i] = np.asarray(sample[:-1], dtype=np.float64)
target[i] = np.asarray(sample[-1], dtype=np.int)
return MyDataFile(data=data, target=target, featureNames=temp[2:len(temp)-1])
sns.set_style("whitegrid")
sns.set_context("poster")
gameData = LoadDataset("GameData", "allCSVData")
dataFrame = pd.DataFrame(gameData.data)
dataFrame.columns = gameData.featureNames
dataFrame['TargetVP'] = gameData.target
X = dataFrame.drop('TargetVP', axis = 1)
Y = dataFrame['TargetVP']
print(len(X))
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size = 0.33, random_state = 5)
isNew = True
lm = None
if isNew:
lm = MLPRegressor()
lm.fit(X_train, Y_train)
else:
Tk().withdraw()
filename = askopenfilename(filetypes=(("Model files", "*.mod"),
("All files", "*.*")))
with open('{0}'.format(filename), 'rb') as handle:
lm = cPickle.load(handle)
if lm is not None:
lm.fit(X_train, Y_train)
if lm is not None:
Y_pred = lm.predict(X_test)
pointsSize = [1 for n in range(len(X))]
plt.scatter(Y_test, Y_pred, s=pointsSize)
plt.xlabel("Total Victory Points (player - all) $Y_i$")
plt.ylabel("Predicted Total VP $\hat{Y}_i$")
plt.title("Total VP vs Predicted Total VP: $Y_i$ vs $\hat{Y}_i$")
plt.show()
mse = sklearn.metrics.mean_squared_error(Y_test, Y_pred)
print(mse)
yey = sklearn.metrics.r2_score(Y_test, Y_pred)
print(yey)
with open("Models/mainModel.mod", 'wb') as handle:
cPickle.dump(lm, handle, protocol=cPickle.HIGHEST_PROTOCOL)
else:
print("Error! lm is None!") | gpl-3.0 |
pravsripad/mne-python | tutorials/preprocessing/59_head_positions.py | 13 | 3722 | # -*- coding: utf-8 -*-
"""
.. _tut-head-pos:
================================================
Extracting and visualizing subject head movement
================================================
Continuous head movement can be encoded during MEG recordings by use of
HPI coils that continuously emit sinusoidal signals. These signals can then be
extracted from the recording and used to estimate head position as a function
of time. Here we show an example of how to do this, and how to visualize
the result.
HPI frequencies
---------------
First let's load a short bit of raw data where the subject intentionally moved
their head during the recording. Its power spectral density shows five peaks
(most clearly visible in the gradiometers) corresponding to the HPI coil
frequencies, plus other peaks related to power line interference (60 Hz and
harmonics).
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
# Daniel McCloy <dan@mccloy.info>
#
# License: BSD-3-Clause
# %%
from os import path as op
import mne
data_path = op.join(mne.datasets.testing.data_path(verbose=True), 'SSS')
fname_raw = op.join(data_path, 'test_move_anon_raw.fif')
raw = mne.io.read_raw_fif(fname_raw, allow_maxshield='yes').load_data()
raw.plot_psd()
# %%
# We can use `mne.chpi.get_chpi_info` to retrieve the coil frequencies,
# the index of the channel indicating when which coil was switched on, and the
# respective "event codes" associated with each coil's activity.
chpi_freqs, ch_idx, chpi_codes = mne.chpi.get_chpi_info(info=raw.info)
print(f'cHPI coil frequencies extracted from raw: {chpi_freqs} Hz')
# %%
# Estimating continuous head position
# -----------------------------------
#
# First, let's extract the HPI coil amplitudes as a function of time:
chpi_amplitudes = mne.chpi.compute_chpi_amplitudes(raw)
# %%
# Second, let's compute time-varying HPI coil locations from these:
chpi_locs = mne.chpi.compute_chpi_locs(raw.info, chpi_amplitudes)
# %%
# Lastly, compute head positions from the coil locations:
head_pos = mne.chpi.compute_head_pos(raw.info, chpi_locs, verbose=True)
# %%
# Note that these can then be written to disk or read from disk with
# :func:`mne.chpi.write_head_pos` and :func:`mne.chpi.read_head_pos`,
# respectively.
#
# Visualizing continuous head position
# ------------------------------------
#
# We can plot as traces, which is especially useful for long recordings:
# sphinx_gallery_thumbnail_number = 2
mne.viz.plot_head_positions(head_pos, mode='traces')
# %%
# Or we can visualize them as a continuous field (with the vectors pointing
# in the head-upward direction):
mne.viz.plot_head_positions(head_pos, mode='field')
# %%
# These head positions can then be used with
# :func:`mne.preprocessing.maxwell_filter` to compensate for movement,
# or with :func:`mne.preprocessing.annotate_movement` to mark segments as
# bad that deviate too much from the average head position.
#
#
# Computing SNR of the HPI signal
# -------------------------------
#
# It is also possible to compute the SNR of the continuous HPI measurements.
# This can be a useful proxy for head position along the vertical dimension,
# i.e., it can indicate the distance between the HPI coils and the MEG sensors.
# Using `~mne.chpi.compute_chpi_snr`, the HPI power and SNR are computed
# separately for each MEG sensor type and each HPI coil (frequency), along with
# the residual power for each sensor type. The results can then be visualized
# with `~mne.viz.plot_chpi_snr`. Here we'll just show a few seconds, for speed:
raw.crop(tmin=5, tmax=10)
snr_dict = mne.chpi.compute_chpi_snr(raw)
fig = mne.viz.plot_chpi_snr(snr_dict)
| bsd-3-clause |
google/paxml | paxml/base_experiment.py | 1 | 4624 | # coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of a ML experiment.
Specifically, BaseExperiment encapsulates all the hyperparameters related
to a specific ML experiment.
"""
import abc
from typing import Dict, List, Type, TypeVar
from paxml import automl
from paxml import base_task
from praxis import base_input
_BaseExperimentT = TypeVar('_BaseExperimentT', bound='BaseExperiment')
BaseExperimentT = Type[_BaseExperimentT]
class BaseExperiment(metaclass=abc.ABCMeta):
"""Encapsulates the hyperparameters of an experiment."""
# p.is_training on each input param is used to determine whether
# the dataset is used for training or eval.
# All training and eval datasets must have unique names.
@abc.abstractmethod
def datasets(self) -> List[base_input.BaseInput.HParams]:
"""Returns the list of dataset parameters."""
def training_dataset(self) -> base_input.BaseInput.HParams:
"""Returns the tentatively unique training split.
Raises a ValueError exception if there is no training split or there are
multiple of them.
"""
training_splits = [s for s in self.datasets() if s.is_training]
if not training_splits:
raise ValueError(
'Could not find any training split dataset in this experiment '
f'config (`{self.datasets()}`).')
if len(training_splits) > 1:
raise ValueError(
'Found multiple training split datasets in this experiment '
f'config (`{self.datasets()}`).')
return training_splits[0]
# Optional. Returns a list of datasets to be decoded.
# When specified, all decoder datasets must have unique names.
def decoder_datasets(self) -> List[base_input.BaseInput.HParams]:
"""Returns the list of dataset parameters for decoder."""
return []
@abc.abstractmethod
def task(self) -> base_task.BaseTask.HParams:
"""Returns the task parameters."""
def get_input_specs_provider_params(
self) -> base_input.BaseInputSpecsProvider.HParams:
"""Returns the hparams of the input specs provider.
By default, it retrieves the input specs from the training input pipeline
(hence, required to exist). But the method can be overridden in derived
classes to return a different input specs provider, which directly
returns the specs.
Returns:
An InputSpecsProvider instance.
Raises:
A ValueError if there is no training set. In this case, the user must
override this method to provide the input specs for model weight
initialization.
"""
# TODO(b/236417790): Make this method fully abstract and enforce users to
# provide input specs.
input_p = self.training_dataset()
return base_input.DatasetInputSpecsProvider.HParams(input_p=input_p)
def validate(self) -> None:
"""Validates the experiment config but raises if misconfigured."""
return
def search(self) -> automl.SearchHParams:
"""Returns the parameters for AutoML search."""
raise NotImplementedError(
'Please implement `search` method for your experiment for tuning.')
def sub_experiments(self) -> Dict[str, Type['BaseExperiment']]:
"""Creates sub-experiments for joint tuning.
A PAX experiment can have multiple sub-experiments during tuning, which
will be included in a single trial and run in sequence. Each sub-experiment
is described by an ID (str) and a `BaseExperiment` subclass, therefore,
PAX users can include multiple PAX experiments in the same tuning task and
use their metrics to compute tuning rewards. Please note that when a PAX
experiment class is included as a sub-experiment of other experiment, its
own sub-experiments will not be included. Users can also programmatically
create new classes based on current class, by overriding class attributes
or overriding its method.
Returns:
A dict of sub-experiment ID to sub-experiment class.
"""
return {'': self.__class__}
def __init_subclass__(cls):
"""Modifications to the subclasses."""
automl.enable_class_level_hyper_primitives(cls)
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter7/fig_spec_decompositions.py | 4 | 4673 | """
SDSS spectra Decompositions
---------------------------
Figure 7.4
A comparison of the decomposition of SDSS spectra using PCA (left panel -
see Section 7.3.1), ICA (middle panel - see Section 7.6) and NMF (right panel
- see Section 7.4). The rank of the component increases from top to bottom. For
the ICA and PCA the first component is the mean spectrum (NMF does not require
mean subtraction). All of these techniques isolate a common set of spectral
features (identifying features associated with the continuum and line
emission). The ordering of the spectral components is technique dependent.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import NMF
from sklearn.decomposition import FastICA
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#----------------------------------------------------------------------
# Compute PCA, ICA, and NMF components
# we'll save the results so that they can be re-used
@pickle_results('spec_decompositions.pkl')
def compute_PCA_ICA_NMF(n_components=5):
spec_mean = spectra.mean(0)
# PCA: use randomized PCA for speed
pca = RandomizedPCA(n_components - 1, random_state=0)
pca.fit(spectra)
pca_comp = np.vstack([spec_mean,
pca.components_])
# ICA treats sequential observations as related. Because of this, we need
# to fit with the transpose of the spectra
ica = FastICA(n_components - 1, random_state=0)
ica.fit(spectra.T)
ica_comp = np.vstack([spec_mean,
ica.transform(spectra.T).T])
# NMF requires all elements of the input to be greater than zero
spectra[spectra < 0] = 0
nmf = NMF(n_components, random_state=0)
nmf.fit(spectra)
nmf_comp = nmf.components_
return pca_comp, ica_comp, nmf_comp
n_components = 5
decompositions = compute_PCA_ICA_NMF(n_components)
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 4))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = ['PCA components', 'ICA components', 'NMF components']
for i, comp in enumerate(decompositions):
for j in range(n_components):
ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.xaxis.set_major_locator(
plt.FixedLocator(list(range(3000, 7999, 1000))))
ax.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax.plot(wavelengths, comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 8000]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
if j == 0:
ax.set_title(titles[i])
if titles[i].startswith('PCA') or titles[i].startswith('ICA'):
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
else:
label = 'component %i' % (j + 1)
ax.text(0.03, 0.94, label, transform=ax.transAxes,
ha='left', va='top')
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(2)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.set_xlim(xlim)
plt.show()
| bsd-2-clause |
xiaolonw/fast-rcnn-normal | lib/datasets/imdb.py | 4 | 7114 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import os.path as osp
import PIL
from utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
import datasets
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._obj_proposer = 'selective_search'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def append_flipped_images(self):
num_images = self.num_images
# Load only the first image, for finding the size,
# hopefully all the sizes are the same
widths = [PIL.Image.open(self.image_path_at(i)[0]).size[0]
for i in xrange(num_images)]
for i in xrange(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes' : boxes,
'gt_overlaps' : self.roidb[i]['gt_overlaps'],
'gt_classes' : self.roidb[i]['gt_classes'],
'flipped' : True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def evaluate_recall(self, candidate_boxes, ar_thresh=0.5):
# Record max overlap value for each gt box
# Return vector of overlap values
gt_overlaps = np.zeros(0)
for i in xrange(self.num_images):
gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
# gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0)))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
argmax_overlaps = overlaps.argmax(axis=0)
max_overlaps = overlaps.max(axis=0)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
box_ind = argmax_overlaps[gt_ind]
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
num_pos = gt_overlaps.size
gt_overlaps = np.sort(gt_overlaps)
step = 0.001
thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0)
recalls = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = 2 * np.trapz(recalls, thresholds)
return ar, gt_overlaps, recalls, thresholds
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in xrange(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
if gt_overlaps.shape[1] > 0:
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
else:
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({'boxes' : boxes,
'gt_classes' : np.zeros((num_boxes,),
dtype=np.int32),
'gt_overlaps' : overlaps,
'flipped' : False})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'], b[i]['gt_classes']))
if(a[i]['gt_overlaps'].shape[0] == 0):
a[i]['gt_overlaps'] = b[i]['gt_overlaps']
else:
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'], b[i]['gt_overlaps']])
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
| mit |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/DecisionTreeClassifier.py | 1 | 2499 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, DecisionTreeClassifier]
pipeline_parameters = {}
min_impurity_decrease_values = np.random.exponential(scale=0.01, size=num_param_combinations)
max_features_values = np.random.choice(list(np.arange(0.01, 1., 0.01)) + ['sqrt', 'log2', None], size=num_param_combinations)
criterion_values = np.random.choice(['gini', 'entropy'], size=num_param_combinations)
max_depth_values = np.random.choice(list(range(1, 51)) + [None], size=num_param_combinations)
all_param_combinations = zip(min_impurity_decrease_values, max_features_values, criterion_values, max_depth_values)
pipeline_parameters[DecisionTreeClassifier] = \
[{'min_impurity_decrease': min_impurity_decrease, 'max_features': max_features, 'criterion': criterion, 'max_depth': max_depth, 'random_state': 324089}
for (min_impurity_decrease, max_features, criterion, max_depth) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
pravsripad/mne-python | examples/decoding/decoding_spatio_temporal_source.py | 11 | 4405 | # -*- coding: utf-8 -*-
"""
.. _ex-dec-st-source:
==========================
Decoding source space data
==========================
Decoding to MEG data in source space on the left cortical surface. Here
univariate feature selection is employed for speed purposes to confine the
classification to a small number of potentially relevant features. The
classifier then is trained to selected features of epochs in source space.
"""
# sphinx_gallery_thumbnail_number = 2
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LogisticRegression
import mne
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.decoding import (cross_val_multiscore, LinearModel, SlidingEstimator,
get_coef)
print(__doc__)
data_path = mne.datasets.sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
fname_fwd = meg_path / 'sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = meg_path / 'sample_audvis-ave.fif'
subjects_dir = data_path / 'subjects'
# %%
# Set parameters
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = meg_path / 'sample_audvis-cov.fif'
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.8
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(None, 10., fir_design='firwin')
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
# %%
# Compute inverse solution
snr = 3.0
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
stcs = apply_inverse_epochs(epochs, inverse_operator,
lambda2=1.0 / snr ** 2, verbose=False,
method="dSPM", pick_ori="normal")
# %%
# Decoding in sensor space using a logistic regression
# Retrieve source space data into an array
X = np.array([stc.lh_data for stc in stcs]) # only keep left hemisphere
y = epochs.events[:, 2]
# prepare a series of classifier applied at each time sample
clf = make_pipeline(StandardScaler(), # z-score normalization
SelectKBest(f_classif, k=500), # select features for speed
LinearModel(LogisticRegression(C=1, solver='liblinear')))
time_decod = SlidingEstimator(clf, scoring='roc_auc')
# Run cross-validated decoding analyses:
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=None)
# Plot average decoding scores of 5 splits
fig, ax = plt.subplots(1)
ax.plot(epochs.times, scores.mean(0), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.axvline(0, color='k')
plt.legend()
# %%
# To investigate weights, we need to retrieve the patterns of a fitted model
# The fitting needs not be cross validated because the weights are based on
# the training sets
time_decod.fit(X, y)
# Retrieve patterns after inversing the z-score normalization step:
patterns = get_coef(time_decod, 'patterns_', inverse_transform=True)
stc = stcs[0] # for convenience, lookup parameters from first stc
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(np.abs(patterns), vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s',
subjects_dir=subjects_dir)
| bsd-3-clause |
pravsripad/mne-python | examples/decoding/decoding_csp_eeg.py | 11 | 4808 | # -*- coding: utf-8 -*-
"""
.. _ex-decoding-csp-eeg:
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP. A
classifier is then applied to features extracted on CSP-filtered signals.
See https://en.wikipedia.org/wiki/Common_spatial_pattern and
:footcite:`Koles1991`. The EEGBCI dataset is documented in
:footcite:`SchalkEtAl2004` and is available at PhysioNet
:footcite:`GoldbergerEtAl2000`.
"""
# Authors: Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD-3-Clause
# %%
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, events_from_annotations
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw) # set channel names
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
# %%
# Classification with linear discrimant analysis
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=None)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5)
# %%
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv_split:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/linear_model/coordinate_descent.py | 7 | 84720 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(alpha=1.0, copy_X=True, fit_intercept=True, l1_ratio=0.5,
max_iter=1000, normalize=False, positive=False, precompute=False,
random_state=0, selection='cyclic', tol=0.0001, warm_start=False)
>>> print(regr.coef_) # doctest: +ELLIPSIS
[ 18.83816048 64.55968825]
>>> print(regr.intercept_) # doctest: +ELLIPSIS
1.45126075617
>>> print(regr.predict([[0, 0]])) # doctest: +ELLIPSIS
[ 1.45126076]
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X, y))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import ElasticNetCV
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNetCV(cv=5, random_state=0)
>>> regr.fit(X, y)
ElasticNetCV(alphas=None, copy_X=True, cv=5, eps=0.001, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1,
normalize=False, positive=False, precompute='auto', random_state=0,
selection='cyclic', tol=0.0001, verbose=0)
>>> print(regr.alpha_) # doctest: +ELLIPSIS
0.19947279427
>>> print(regr.intercept_) # doctest: +ELLIPSIS
0.398882965428
>>> print(regr.predict([[0, 0]])) # doctest: +ELLIPSIS
[ 0.39888297]
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in
cross-validation.
ElasticNet
MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target. Will be cast to X's dtype if necessary
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation
Lasso
MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`. Used when ``selection`` ==
'random'
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/pytorch/pytorch_transfer_learning.py | 2 | 8058 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os, time, copy
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
plt.ion() # Interactive mode.
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # Pause a bit so that plots are updated.
def train_model(model, dataloaders, device, dataset_sizes, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase.
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode.
else:
model.eval() # Set model to evaluate mode.
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]: # NOTE [info] >> Errors incluing "photoshop" occurred in pillow 6.0.0.
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients.
optimizer.zero_grad()
# Forward.
# Track history if only in train.
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# Backward + optimize only if in training phase.
if phase == 'train':
loss.backward()
optimizer.step()
# Statistics.
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# Deep copy the model.
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# Load best model weights.
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, device, dataloaders, class_names, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images // 2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
# REF [site] >> https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
def finetuning_example():
# Load Data.
# Data augmentation and normalization for training.
# Just normalization for validation.
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Visualize a few images.
if False:
# Get a batch of training data.
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch.
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
#--------------------
# Finetune the convnet.
# Show a model architecture.
#print(torchvision.models.resnet18(pretrained=True))
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized.
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs.
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# Train and evaluate.
model_ft = train_model(model_ft, dataloaders, device, dataset_sizes, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)
visualize_model(model_ft, device, dataloaders, class_names)
# REF [site] >> https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
def convnet_as_fixed_feature_extractor_example():
# Load Data.
# Data augmentation and normalization for training.
# Just normalization for validation.
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Visualize a few images.
if False:
# Get a batch of training data.
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch.
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
#--------------------
model_conv = torchvision.models.resnet18(pretrained=True)
# Freeze weights.
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default.
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as opposed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs.
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
#--------------------
# Train and evaluate.
model_conv = train_model(model_conv, dataloaders, device, dataset_sizes, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25)
visualize_model(model_conv, device, dataloaders, class_names)
plt.ioff()
plt.show()
def main():
finetuning_example()
convnet_as_fixed_feature_extractor_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
h2oai/h2o | py/testdir_single_jvm/test_GLM2_covtype_train_predict_all_all.py | 9 | 4367 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_gbm, h2o_exec as h2e
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_train_predict_all_all(self):
importFolderPath = "standard"
csvFilename = 'covtype.shuffled.data'
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + ".hex"
# Parse and Exec************************************************
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=180)
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
# use exec to change the output col to binary, case_mode/case_val doesn't work if we use predict
# will have to live with random extract. will create variance
# class 4 = 1, everything else 0
y = 54
execExpr="A.hex[,%s]=(A.hex[,%s]==%s)" % (y+1, y+1, 1) # class 1
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
inspect = h2o_cmd.runInspect(key="A.hex")
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "Use same data (full) for train and test"
trainDataKey = "A.hex"
testDataKey = "A.hex"
# start at 90% rows + 1
# GLM, predict, CM*******************************************************8
kwargs = {
'response': 'C' + str(y+1),
'max_iter': 20,
'n_folds': 0,
# 'alpha': 0.1,
# 'lambda': 1e-5,
'alpha': 0.0,
'lambda': None,
'family': 'binomial',
}
timeoutSecs = 60
for trial in range(1):
# test/train split **********************************************8
aHack = {'destination_key': trainDataKey}
# GLM **********************************************8
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
print "glm end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
modelKey = glm['glm_model']['_key']
submodels = glm['glm_model']['submodels']
# hackery to make it work when there's just one
validation = submodels[-1]['validation']
best_threshold = validation['best_threshold']
thresholds = validation['thresholds']
# have to look up the index for the cm, from the thresholds list
best_index = None
for i,t in enumerate(thresholds):
if t == best_threshold:
best_index = i
break
cms = validation['_cms']
cm = cms[best_index]
trainPctWrong = h2o_gbm.pp_cm_summary(cm['_arr']);
# Score **********************************************
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key=testDataKey,
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual=testDataKey,
vactual='C' + str(y+1),
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
self.assertEqual(pctWrong, trainPctWrong,"Should see the same error rate on train and predict? (same data set)")
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
print "Trial #", trial, "completed"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
schets/scikit-learn | sklearn/utils/graph.py | 50 | 6169 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
rubasben/namebench | nb_third_party/dns/node.py | 215 | 5914 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
| apache-2.0 |
schets/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 141 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
luoyetx/mxnet | example/stochastic-depth/sd_cifar10.py | 19 | 10326 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
###########################################################################################
# Implementation of the stochastic depth algorithm described in the paper
#
# Huang, Gao, et al. "Deep networks with stochastic depth." arXiv preprint arXiv:1603.09382 (2016).
#
# Reference torch implementation can be found at https://github.com/yueatsprograms/Stochastic_Depth
#
# There are some differences in the implementation:
# - A BN->ReLU->Conv is used for skip connection when input and output shapes are different,
# as oppose to a padding layer.
# - The residual block is different: we use BN->ReLU->Conv->BN->ReLU->Conv, as oppose to
# Conv->BN->ReLU->Conv->BN (->ReLU also applied to skip connection).
# - We did not try to match with the same initialization, learning rate scheduling, etc.
#
#--------------------------------------------------------------------------------
# A sample from the running log (We achieved ~9.4% error after 500 epochs, some
# more careful tuning of the hyper parameters and maybe also the arch is needed
# to achieve the reported numbers in the paper):
#
# INFO:root:Epoch[80] Batch [50] Speed: 1020.95 samples/sec Train-accuracy=0.910080
# INFO:root:Epoch[80] Batch [100] Speed: 1013.41 samples/sec Train-accuracy=0.912031
# INFO:root:Epoch[80] Batch [150] Speed: 1035.48 samples/sec Train-accuracy=0.913438
# INFO:root:Epoch[80] Batch [200] Speed: 1045.00 samples/sec Train-accuracy=0.907344
# INFO:root:Epoch[80] Batch [250] Speed: 1055.32 samples/sec Train-accuracy=0.905937
# INFO:root:Epoch[80] Batch [300] Speed: 1071.71 samples/sec Train-accuracy=0.912500
# INFO:root:Epoch[80] Batch [350] Speed: 1033.73 samples/sec Train-accuracy=0.910937
# INFO:root:Epoch[80] Train-accuracy=0.919922
# INFO:root:Epoch[80] Time cost=48.348
# INFO:root:Saved checkpoint to "sd-110-0081.params"
# INFO:root:Epoch[80] Validation-accuracy=0.880142
# ...
# INFO:root:Epoch[115] Batch [50] Speed: 1037.04 samples/sec Train-accuracy=0.937040
# INFO:root:Epoch[115] Batch [100] Speed: 1041.12 samples/sec Train-accuracy=0.934219
# INFO:root:Epoch[115] Batch [150] Speed: 1036.02 samples/sec Train-accuracy=0.933125
# INFO:root:Epoch[115] Batch [200] Speed: 1057.49 samples/sec Train-accuracy=0.938125
# INFO:root:Epoch[115] Batch [250] Speed: 1060.56 samples/sec Train-accuracy=0.933438
# INFO:root:Epoch[115] Batch [300] Speed: 1046.25 samples/sec Train-accuracy=0.935625
# INFO:root:Epoch[115] Batch [350] Speed: 1043.83 samples/sec Train-accuracy=0.927188
# INFO:root:Epoch[115] Train-accuracy=0.938477
# INFO:root:Epoch[115] Time cost=47.815
# INFO:root:Saved checkpoint to "sd-110-0116.params"
# INFO:root:Epoch[115] Validation-accuracy=0.884415
# ...
# INFO:root:Saved checkpoint to "sd-110-0499.params"
# INFO:root:Epoch[498] Validation-accuracy=0.908554
# INFO:root:Epoch[499] Batch [50] Speed: 1068.28 samples/sec Train-accuracy=0.991422
# INFO:root:Epoch[499] Batch [100] Speed: 1053.10 samples/sec Train-accuracy=0.991094
# INFO:root:Epoch[499] Batch [150] Speed: 1042.89 samples/sec Train-accuracy=0.995156
# INFO:root:Epoch[499] Batch [200] Speed: 1066.22 samples/sec Train-accuracy=0.991406
# INFO:root:Epoch[499] Batch [250] Speed: 1050.56 samples/sec Train-accuracy=0.990781
# INFO:root:Epoch[499] Batch [300] Speed: 1032.02 samples/sec Train-accuracy=0.992500
# INFO:root:Epoch[499] Batch [350] Speed: 1062.16 samples/sec Train-accuracy=0.992969
# INFO:root:Epoch[499] Train-accuracy=0.994141
# INFO:root:Epoch[499] Time cost=47.401
# INFO:root:Saved checkpoint to "sd-110-0500.params"
# INFO:root:Epoch[499] Validation-accuracy=0.906050
# ###########################################################################################
import os
import sys
import mxnet as mx
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from utils import get_data
import sd_module
def residual_module(death_rate, n_channel, name_scope, context, stride=1, bn_momentum=0.9):
data = mx.sym.Variable(name_scope + '_data')
# computation branch:
# BN -> ReLU -> Conv -> BN -> ReLU -> Conv
bn1 = mx.symbol.BatchNorm(data=data, name=name_scope + '_bn1', fix_gamma=False,
momentum=bn_momentum,
# Same with https://github.com/soumith/cudnn.torch/blob/master/BatchNormalization.lua
# cuDNN v5 don't allow a small eps of 1e-5
eps=2e-5
)
relu1 = mx.symbol.Activation(data=bn1, act_type='relu', name=name_scope+'_relu1')
conv1 = mx.symbol.Convolution(data=relu1, num_filter=n_channel, kernel=(3, 3), pad=(1,1),
stride=(stride, stride), name=name_scope+'_conv1')
bn2 = mx.symbol.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_momentum,
eps=2e-5, name=name_scope+'_bn2')
relu2 = mx.symbol.Activation(data=bn2, act_type='relu', name=name_scope+'_relu2')
conv2 = mx.symbol.Convolution(data=relu2, num_filter=n_channel, kernel=(3, 3), pad=(1,1),
stride=(1, 1), name=name_scope+'_conv2')
sym_compute = conv2
# skip branch
if stride > 1:
sym_skip = mx.symbol.BatchNorm(data=data, fix_gamma=False, momentum=bn_momentum,
eps=2e-5, name=name_scope+'_skip_bn')
sym_skip = mx.symbol.Activation(data=sym_skip, act_type='relu', name=name_scope+'_skip_relu')
sym_skip = mx.symbol.Convolution(data=sym_skip, num_filter=n_channel, kernel=(3, 3), pad=(1, 1),
stride=(stride, stride), name=name_scope+'_skip_conv')
else:
sym_skip = None
mod = sd_module.StochasticDepthModule(sym_compute, sym_skip, data_names=[name_scope+'_data'],
context=context, death_rate=death_rate)
return mod
#################################################################################
# Build architecture
# Configurations
bn_momentum = 0.9
contexts = [mx.context.gpu(i) for i in range(1)]
n_residual_blocks = 18
death_rate = 0.5
death_mode = 'linear_decay' # 'linear_decay' or 'uniform'
n_classes = 10
def get_death_rate(i_res_block):
n_total_res_blocks = n_residual_blocks * 3
if death_mode == 'linear_decay':
my_death_rate = float(i_res_block) / n_total_res_blocks * death_rate
else:
my_death_rate = death_rate
return my_death_rate
# 0. base ConvNet
sym_base = mx.sym.Variable('data')
sym_base = mx.sym.Convolution(data=sym_base, num_filter=16, kernel=(3, 3), pad=(1, 1), name='conv1')
sym_base = mx.sym.BatchNorm(data=sym_base, name='bn1', fix_gamma=False, momentum=bn_momentum, eps=2e-5)
sym_base = mx.sym.Activation(data=sym_base, name='relu1', act_type='relu')
mod_base = mx.mod.Module(sym_base, context=contexts, label_names=None)
# 1. container
mod_seq = mx.mod.SequentialModule()
mod_seq.add(mod_base)
# 2. first group, 16 x 28 x 28
i_res_block = 0
for i in range(n_residual_blocks):
mod_seq.add(residual_module(get_death_rate(i_res_block), 16, 'res_A_%d' % i, contexts), auto_wiring=True)
i_res_block += 1
# 3. second group, 32 x 14 x 14
mod_seq.add(residual_module(get_death_rate(i_res_block), 32, 'res_AB', contexts, stride=2), auto_wiring=True)
i_res_block += 1
for i in range(n_residual_blocks-1):
mod_seq.add(residual_module(get_death_rate(i_res_block), 32, 'res_B_%d' % i, contexts), auto_wiring=True)
i_res_block += 1
# 4. third group, 64 x 7 x 7
mod_seq.add(residual_module(get_death_rate(i_res_block), 64, 'res_BC', contexts, stride=2), auto_wiring=True)
i_res_block += 1
for i in range(n_residual_blocks-1):
mod_seq.add(residual_module(get_death_rate(i_res_block), 64, 'res_C_%d' % i, contexts), auto_wiring=True)
i_res_block += 1
# 5. final module
sym_final = mx.sym.Variable('data')
sym_final = mx.sym.Pooling(data=sym_final, kernel=(7, 7), pool_type='avg', name='global_pool')
sym_final = mx.sym.FullyConnected(data=sym_final, num_hidden=n_classes, name='logits')
sym_final = mx.sym.SoftmaxOutput(data=sym_final, name='softmax')
mod_final = mx.mod.Module(sym_final, context=contexts)
mod_seq.add(mod_final, auto_wiring=True, take_labels=True)
#################################################################################
# Training
num_examples = 60000
batch_size = 128
base_lr = 0.008
lr_factor = 0.5
lr_factor_epoch = 100
momentum = 0.9
weight_decay = 0.00001
kv_store = 'local'
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
num_epochs = 500
epoch_size = num_examples // batch_size
lr_scheduler = mx.lr_scheduler.FactorScheduler(step=max(int(epoch_size * lr_factor_epoch), 1), factor=lr_factor)
batch_end_callbacks = [mx.callback.Speedometer(batch_size, 50)]
epoch_end_callbacks = [mx.callback.do_checkpoint('sd-%d' % (n_residual_blocks * 6 + 2))]
args = type('', (), {})()
args.batch_size = batch_size
args.data_dir = os.path.join(os.path.dirname(__file__), "data")
kv = mx.kvstore.create(kv_store)
train, val = get_data.get_cifar10_iterator(args, kv)
logging.basicConfig(level=logging.DEBUG)
mod_seq.fit(train, val,
optimizer_params={'learning_rate': base_lr, 'momentum': momentum,
'lr_scheduler': lr_scheduler, 'wd': weight_decay},
num_epoch=num_epochs, batch_end_callback=batch_end_callbacks,
epoch_end_callback=epoch_end_callbacks,
initializer=initializer)
| apache-2.0 |
herilalaina/scikit-learn | examples/plot_feature_stacker.py | 78 | 1911 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way too high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
h2oai/h2o | py/testdir_single_jvm/test_speedrf_params_rand2.py | 9 | 3800 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_import as h2i, h2o_util
paramDict = {
# 2 new
'destination_key': ['model_keyA', '012345', '__hello'],
'cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'],
# exclusion handled below, otherwise exception:
# ...Arguments 'cols', 'ignored_cols_by_name', and 'ignored_cols' are exclusive
'ignored_cols_by_name': [None, None, None, None, 'C1','C2','C3','C4','C5','C6','C7','C8','C9'],
# probably can't deal with mixtures of cols and ignore, so just use cols for now
# could handle exclusion below
# 'ignored_cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'],
'n_folds': [None, 2, 5], # has to be >= 2?
'keep_cross_validation_splits': [None, 0, 1],
# 'classification': [None, 0, 1],
# doesn't support regression yet
'classification': [None, 1],
'balance_classes': [None, 0, 1],
# never run with unconstrained balance_classes size if random sets balance_classes..too slow
'max_after_balance_size': [.1, 1, 2],
'oobee': [None, 0, 1],
'sampling_strategy': [None, 'RANDOM'],
'select_stat_type': [None, 'ENTROPY', 'GINI'],
'response': [54, 'C55'], # equivalent. None is not legal
'validation': [None, 'covtype.data.hex'],
'ntrees': [1], # just do one tree
'importance': [None, 0, 1],
'max_depth': [None, 1,10,20,100],
'nbins': [None,5,10,100,1000],
'sample_rate': [None,0.20,0.40,0.60,0.80,0.90],
'seed': [None,'0','1','11111','19823134','1231231'],
# Can't have more mtries than cols..force to 4 if cols is not None?
'mtries': [1,3,5,7],
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_speedrf_params_rand2_fvec(self):
csvPathname = 'standard/covtype.data'
hex_key = 'covtype.data.hex'
for trial in range(10):
# params is mutable. This is default.
# response is required for SpeeERF
params = {
'response': 'C55',
'ntrees': 1, 'mtries': 7,
'balance_classes': 0,
# never run with unconstrained balance_classes size if random sets balance_classes..too slow
'max_after_balance_size': 2,
'importance': 0}
colX = h2o_util.pickRandParams(paramDict, params)
if 'cols' in params and params['cols']:
# exclusion
if 'ignored_cols_by_name' in params:
params['ignored_cols_by_name'] = None
else:
if 'ignored_cols_by_name' in params and params['ignored_cols_by_name']:
params['mtries'] = random.randint(1,53)
else:
params['mtries'] = random.randint(1,54)
kwargs = params.copy()
# adjust timeoutSecs with the number of trees
timeoutSecs = 80 + ((kwargs['ntrees']*80) * max(1,kwargs['mtries']/60) )
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key)
h2o_cmd.runSpeeDRF(parseResult=parseResult, timeoutSecs=timeoutSecs, retryDelaySecs=1, **kwargs)
elapsed = time.time()-start
print "Trial #", trial, "completed in", elapsed, "seconds.", "%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
schets/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 213 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
h2oai/h2o | py/testdir_multi_jvm/test_GLM2_catdata.py | 9 | 2066 | import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_catdata_hosts(self):
# these are still in /home/kevin/scikit/datasets/logreg
# FIX! just two for now..
csvFilenameList = [
"1_100kx7_logreg.data.gz",
"2_100kx7_logreg.data.gz"
]
# pop open a browser on the cloud
### h2b.browseTheCloud()
# save the first, for all comparisions, to avoid slow drift with each iteration
validation1 = {}
for csvFilename in csvFilenameList:
csvPathname = csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "\n" + csvPathname
start = time.time()
# FIX! why can't I include 0 here? it keeps getting 'unable to solve" if 0 is included
# 0 by itself is okay?
kwargs = {'response': 7, 'family': "binomial", 'n_folds': 3, 'lambda': 1e-4}
timeoutSecs = 200
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, 'C7', **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
### h2b.browseJsonHistoryAsUrlLastMatch("GLM")
# compare this glm to the first one. since the files are replications, the results
# should be similar?
validation = glm['glm_model']['submodels'][0]['validation']
if validation1:
h2o_glm.compareToFirstGlm(self, 'auc', validation, validation1)
else:
validation1 = copy.deepcopy(validation)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter10/fig_LINEAR_SVM.py | 4 | 6143 | """
SVM classification of LINEAR data
---------------------------------
Figure 10.23
Supervised classification of periodic variable stars from the LINEAR data set
using a support vector machines method. The training sample includes five
input classes. The top row shows clusters derived using two attributes
(g - i and log P) and the bottom row shows analogous diagrams for
classification based on seven attributes (colors u - g, g - i, i - K, and
J - K; log P, light-curve amplitude, and light-curve skewness).
See table 10.3 for the classification performance.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from astroML.decorators import pickle_results
from astroML.datasets import fetch_LINEAR_geneva
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
data = fetch_LINEAR_geneva()
attributes = [('gi', 'logP'),
('gi', 'logP', 'ug', 'iK', 'JK', 'amp', 'skew')]
labels = ['$u-g$', '$g-i$', '$i-K$', '$J-K$',
r'$\log(P)$', 'amplitude', 'skew']
cls = 'LCtype'
Ntrain = 3000
#------------------------------------------------------------
# Create attribute arrays
X = []
y = []
for attr in attributes:
X.append(np.vstack([data[a] for a in attr]).T)
LCtype = data[cls].copy()
# there is no #3. For a better color scheme in plots,
# we'll set 6->3
LCtype[LCtype == 6] = 3
y.append(LCtype)
#@pickle_results('LINEAR_SVM.pkl')
def compute_SVM_results(i_train, i_test):
classifiers = []
predictions = []
Xtests = []
ytests = []
Xtrains = []
ytrains = []
for i in range(len(attributes)):
Xtrain = X[i][i_train]
Xtest = X[i][i_test]
ytrain = y[i][i_train]
ytest = y[i][i_test]
clf = SVC(kernel='linear', class_weight=None)
clf.fit(Xtrain, ytrain)
y_pred = clf.predict(Xtest)
classifiers.append(clf)
predictions.append(y_pred)
return classifiers, predictions
i = np.arange(len(data))
i_train, i_test = train_test_split(i, random_state=0, train_size=2000)
clfs, ypred = compute_SVM_results(i_train, i_test)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
class_labels = []
for i in range(2):
Xtest = X[i][i_test]
ytest = y[i][i_test]
amp = data['amp'][i_test]
# Plot the resulting classifications
ax1 = fig.add_subplot(221 + 2 * i)
ax1.scatter(Xtest[:, 0], Xtest[:, 1],
c=ypred[i], edgecolors='none', s=4, linewidths=0)
ax1.set_ylabel(r'$\log(P)$')
ax2 = plt.subplot(222 + 2 * i)
ax2.scatter(amp, Xtest[:, 1],
c=ypred[i], edgecolors='none', s=4, lw=0)
#------------------------------
# set axis limits
ax1.set_xlim(-0.6, 2.1)
ax2.set_xlim(0.1, 1.5)
ax1.set_ylim(-1.5, 0.5)
ax2.set_ylim(-1.5, 0.5)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
if i == 0:
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax1.set_xlabel(r'$g-i$')
ax2.set_xlabel(r'$A$')
#------------------------------------------------------------
# Second figure
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.11, right=0.95, wspace=0.3)
attrs = ['skew', 'ug', 'iK', 'JK']
labels = ['skew', '$u-g$', '$i-K$', '$J-K$']
ylims = [(-1.8, 2.2), (0.6, 2.9), (0.1, 2.6), (-0.2, 1.2)]
for i in range(4):
ax = fig.add_subplot(221 + i)
ax.scatter(data['gi'][i_test], data[attrs[i]][i_test],
c=ypred[1], edgecolors='none', s=4, lw=0)
ax.set_xlabel('$g-i$')
ax.set_ylabel(labels[i])
ax.set_xlim(-0.6, 2.1)
ax.set_ylim(ylims[i])
#------------------------------------------------------------
# Save the results
#
# run the script as
#
# >$ python fig_LINEAR_clustering.py --save
#
# to output the data file showing the cluster labels of each point
import sys
if len(sys.argv) > 1 and sys.argv[1] == '--save':
filename = 'cluster_labels_svm.dat'
print("Saving cluster labels to", filename)
from astroML.datasets.LINEAR_sample import ARCHIVE_DTYPE
new_data = np.zeros(len(data),
dtype=(ARCHIVE_DTYPE + [('2D_cluster_ID', 'i4'),
('7D_cluster_ID', 'i4')]))
# switch the labels back 3->6
for i in range(2):
ypred[i][ypred[i] == 3] = 6
# need to put labels back in order
class_labels = [-999 * np.ones(len(data)) for i in range(2)]
for i in range(2):
class_labels[i][i_test] = ypred[i]
for name in data.dtype.names:
new_data[name] = data[name]
new_data['2D_cluster_ID'] = class_labels[0]
new_data['7D_cluster_ID'] = class_labels[1]
fmt = ('%.6f %.6f %.3f %.3f %.3f %.3f %.7f %.3f %.3f '
'%.3f %.2f %i %i %s %i %i\n')
F = open(filename, 'w')
F.write('# ra dec ug gi iK JK '
'logP Ampl skew kurt magMed nObs LCtype '
'LINEARobjectID 2D_cluster_ID 7D_cluster_ID\n')
for line in new_data:
F.write(fmt % tuple(line[col] for col in line.dtype.names))
F.close()
plt.show()
| bsd-2-clause |
herilalaina/scikit-learn | sklearn/cluster/bicluster.py | 23 | 20266 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
y : Ignored
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
pravsripad/mne-python | mne/tests/test_source_estimate.py | 2 | 77080 | # -*- coding: utf-8 -*-
#
# License: BSD-3-Clause
from contextlib import nullcontext
from copy import deepcopy
import os
import os.path as op
import re
from shutil import copyfile
import numpy as np
from numpy.fft import fft
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal, assert_array_less)
import pytest
from scipy import sparse
from scipy.optimize import fmin_cobyla
from scipy.spatial.distance import cdist
import mne
from mne import (stats, SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, Label, read_source_spaces,
read_evokeds, MixedSourceEstimate, find_events, Epochs,
read_source_estimate, extract_label_time_course,
spatio_temporal_tris_adjacency, stc_near_sensors,
spatio_temporal_src_adjacency, read_cov, EvokedArray,
spatial_inter_hemi_adjacency, read_forward_solution,
spatial_src_adjacency, spatial_tris_adjacency, pick_info,
SourceSpaces, VolVectorSourceEstimate, read_trans, pick_types,
MixedVectorSourceEstimate, setup_volume_source_space,
convert_forward_solution, pick_types_forward,
compute_source_morph, labels_to_stc, scale_mri,
write_source_spaces)
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne.io import read_info
from mne.io.constants import FIFF
from mne.morph_map import _make_morph_map_hemi
from mne.source_estimate import grade_to_tris, _get_vol_mask
from mne.source_space import _get_src_nn
from mne.transforms import apply_trans, invert_transform, transform_surface_to
from mne.minimum_norm import (read_inverse_operator, apply_inverse,
apply_inverse_epochs, make_inverse_operator)
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (requires_pandas, requires_sklearn, catch_logging,
requires_nibabel, requires_version, _record_warnings)
from mne.io import read_raw_fif
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_inv_fixed = op.join(
data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-fixed-inv.fif')
fname_fwd = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_cov = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_fs_t1 = op.join(data_path, 'subjects', 'fsaverage', 'mri', 'T1.mgz')
fname_aseg = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_src_fs = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
bem_path = op.join(data_path, 'subjects', 'sample', 'bem')
fname_src_3 = op.join(bem_path, 'sample-oct-4-src.fif')
fname_src_vol = op.join(bem_path, 'sample-volume-7mm-src.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_inv_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
fname_nirx = op.join(data_path, 'NIRx', 'nirscout', 'nirx_15_0_recording')
rng = np.random.RandomState(0)
@testing.requires_testing_data
def test_stc_baseline_correction():
"""Test baseline correction for source estimate objects."""
# test on different source estimates
stcs = [read_source_estimate(fname_stc),
read_source_estimate(fname_vol, 'sample')]
# test on different "baseline" intervals
baselines = [(0., 0.1), (None, None)]
for stc in stcs:
times = stc.times
for (start, stop) in baselines:
# apply baseline correction, then check if it worked
stc = stc.apply_baseline(baseline=(start, stop))
t0 = start or stc.times[0]
t1 = stop or stc.times[-1]
# index for baseline interval (include boundary latencies)
imin = np.abs(times - t0).argmin()
imax = np.abs(times - t1).argmin() + 1
# data matrix from baseline interval
data_base = stc.data[:, imin:imax]
mean_base = data_base.mean(axis=1)
zero_array = np.zeros(mean_base.shape[0])
# test if baseline properly subtracted (mean=zero for all sources)
assert_array_almost_equal(mean_base, zero_array)
@testing.requires_testing_data
def test_spatial_inter_hemi_adjacency():
"""Test spatial adjacency between hemispheres."""
# trivial cases
conn = spatial_inter_hemi_adjacency(fname_src_3, 5e-6)
assert_equal(conn.data.size, 0)
conn = spatial_inter_hemi_adjacency(fname_src_3, 5e6)
assert_equal(conn.data.size, np.prod(conn.shape) // 2)
# actually interesting case (1cm), should be between 2 and 10% of verts
src = read_source_spaces(fname_src_3)
conn = spatial_inter_hemi_adjacency(src, 10e-3)
conn = conn.tocsr()
n_src = conn.shape[0]
assert (n_src * 0.02 < conn.data.size < n_src * 0.10)
assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0)
assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0)
c = (conn.T + conn) / 2. - conn
c.eliminate_zeros()
assert_equal(c.data.size, 0)
# check locations
upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray()
assert_equal(upper_right.sum(), conn.sum() // 2)
good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post',
'G_cuneus']
for hi, hemi in enumerate(('lh', 'rh')):
has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right,
axis=1 - hi))[0]]
labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi,
subjects_dir=subjects_dir)
use_labels = [label.name[:-3] for label in labels
if np.in1d(label.vertices, has_neighbors).any()]
assert (set(use_labels) - set(good_labels) == set())
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_version('h5io')
def test_volume_stc(tmp_path):
"""Test volume STCs."""
from h5io import write_hdf5
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data,
data,
np.arange(2)[:, np.newaxis],
np.arange(6).reshape(2, 3, 1)]
vertno = np.arange(N)
vertnos = [vertno,
vertno[:, np.newaxis],
np.arange(2)[:, np.newaxis],
np.arange(2)]
vertno_reads = [vertno, vertno, np.arange(2), np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
if data.ndim in (1, 2):
stc = VolSourceEstimate(data, [vertno], 0, 1)
ext = 'stc'
klass = VolSourceEstimate
else:
assert data.ndim == 3
stc = VolVectorSourceEstimate(data, [vertno], 0, 1)
ext = 'h5'
klass = VolVectorSourceEstimate
fname_temp = tmp_path / ('temp-vl.' + ext)
stc_new = stc
n = 3 if ext == 'h5' else 2
for ii in range(n):
if ii < 2:
stc_new.save(fname_temp, overwrite=True)
else:
# Pass stc.vertices[0], an ndarray, to ensure support for
# the way we used to write volume STCs
write_hdf5(
str(fname_temp), dict(
vertices=stc.vertices[0], data=stc.data,
tmin=stc.tmin, tstep=stc.tstep,
subject=stc.subject, src_type=stc._src_type),
title='mnepython', overwrite=True)
stc_new = read_source_estimate(fname_temp)
assert isinstance(stc_new, klass)
assert_array_equal(vertno_read, stc_new.vertices[0])
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert isinstance(stc, VolSourceEstimate)
assert 'sample' in repr(stc)
assert ' kB' in repr(stc)
stc_new = stc
fname_temp = tmp_path / ('temp-vl.stc')
with pytest.raises(ValueError, match="'ftype' parameter"):
stc.save(fname_vol, ftype='whatever', overwrite=True)
for ftype in ['w', 'h5']:
for _ in range(2):
fname_temp = tmp_path / ('temp-vol.%s' % ftype)
stc_new.save(fname_temp, ftype=ftype, overwrite=True)
stc_new = read_source_estimate(fname_temp)
assert (isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices[0], stc_new.vertices[0])
assert_array_almost_equal(stc.data, stc_new.data)
@requires_nibabel()
@testing.requires_testing_data
def test_stc_as_volume():
"""Test previous volume source estimate morph."""
import nibabel as nib
inverse_operator_vol = read_inverse_operator(fname_inv_vol)
# Apply inverse operator
stc_vol = read_source_estimate(fname_vol, 'sample')
img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=True,
dest='42')
t1_img = nib.load(fname_t1)
# always assure nifti and dimensionality
assert isinstance(img, nib.Nifti1Image)
assert img.header.get_zooms()[:3] == t1_img.header.get_zooms()[:3]
img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=False)
assert isinstance(img, nib.Nifti1Image)
assert img.shape[:3] == inverse_operator_vol['src'][0]['shape'][:3]
with pytest.raises(ValueError, match='Invalid value.*output.*'):
stc_vol.as_volume(inverse_operator_vol['src'], format='42')
@testing.requires_testing_data
@requires_nibabel()
def test_save_vol_stc_as_nifti(tmp_path):
"""Save the stc as a nifti file and export."""
import nibabel as nib
src = read_source_spaces(fname_vsrc)
vol_fname = tmp_path / 'stc.nii.gz'
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert (isinstance(stc, VolSourceEstimate))
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with _record_warnings(): # nib<->numpy
img = nib.load(str(vol_fname))
assert (img.shape == src[0]['shape'] + (len(stc.times),))
with _record_warnings(): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(vol_fname, src, dest='mri', mri_resolution=True,
overwrite=True)
with _record_warnings(): # nib<->numpy
img = nib.load(str(vol_fname))
assert (img.shape == t1_img.shape + (len(stc.times),))
assert_allclose(img.affine, t1_img.affine, atol=1e-5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert (img.shape == t1_img.shape + (len(stc.times),))
assert_allclose(img.affine, t1_img.affine, atol=1e-5)
src = SourceSpaces([src[0], src[0]])
stc = VolSourceEstimate(np.r_[stc.data, stc.data],
[stc.vertices[0], stc.vertices[0]],
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
img = stc.as_volume(src, dest='mri', mri_resolution=False)
assert (img.shape == src[0]['shape'] + (len(stc.times),))
@testing.requires_testing_data
def test_expand():
"""Test stc expansion."""
stc_ = read_source_estimate(fname_stc, 'sample')
vec_stc_ = VectorSourceEstimate(np.zeros((stc_.data.shape[0], 3,
stc_.data.shape[1])),
stc_.vertices, stc_.tmin, stc_.tstep,
stc_.subject)
for stc in [stc_, vec_stc_]:
assert ('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
pytest.raises(TypeError, stc_new.expand, stc_limited.vertices[0])
pytest.raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
pytest.raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10, is_complex=False):
np.random.seed(7)
verts = [np.arange(10), np.arange(90)]
data = np.random.rand(100, n_time)
if is_complex:
data.astype(complex)
return SourceEstimate(data, verts, 0, 1e-1, 'foo')
def _fake_vec_stc(n_time=10, is_complex=False):
np.random.seed(7)
verts = [np.arange(10), np.arange(90)]
data = np.random.rand(100, 3, n_time)
if is_complex:
data.astype(complex)
return VectorSourceEstimate(data, verts, 0, 1e-1,
'foo')
@testing.requires_testing_data
def test_stc_snr():
"""Test computing SNR from a STC."""
inv = read_inverse_operator(fname_inv_fixed)
fwd = read_forward_solution(fname_fwd)
cov = read_cov(fname_cov)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
stc = apply_inverse(evoked, inv)
assert (stc.data < 0).any()
with pytest.warns(RuntimeWarning, match='nAm'):
stc.estimate_snr(evoked.info, fwd, cov) # dSPM
with pytest.warns(RuntimeWarning, match='free ori'):
abs(stc).estimate_snr(evoked.info, fwd, cov)
stc = apply_inverse(evoked, inv, method='MNE')
snr = stc.estimate_snr(evoked.info, fwd, cov)
assert_allclose(snr.times, evoked.times)
snr = snr.data
assert snr.max() < -10
assert snr.min() > -120
def test_stc_attributes():
"""Test STC attributes."""
stc = _fake_stc(n_time=10)
vec_stc = _fake_vec_stc(n_time=10)
n_times = len(stc.times)
assert_equal(stc._data.shape[-1], n_times)
assert_array_equal(stc.times, stc.tmin + np.arange(n_times) * stc.tstep)
assert_array_almost_equal(
stc.times, [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
def attempt_times_mutation(stc):
stc.times -= 1
def attempt_assignment(stc, attr, val):
setattr(stc, attr, val)
# .times is read-only
pytest.raises(ValueError, attempt_times_mutation, stc)
pytest.raises(ValueError, attempt_assignment, stc, 'times', [1])
# Changing .tmin or .tstep re-computes .times
stc.tmin = 1
assert (type(stc.tmin) == float)
assert_array_almost_equal(
stc.times, [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])
stc.tstep = 1
assert (type(stc.tstep) == float)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
# tstep <= 0 is not allowed
pytest.raises(ValueError, attempt_assignment, stc, 'tstep', 0)
pytest.raises(ValueError, attempt_assignment, stc, 'tstep', -1)
# Changing .data re-computes .times
stc.data = np.random.rand(100, 5)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5.])
# .data must match the number of vertices
pytest.raises(ValueError, attempt_assignment, stc, 'data', [[1]])
pytest.raises(ValueError, attempt_assignment, stc, 'data', None)
# .data much match number of dimensions
pytest.raises(ValueError, attempt_assignment, stc, 'data', np.arange(100))
pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
[np.arange(100)])
pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
[[[np.arange(100)]]])
# .shape attribute must also work when ._data is None
stc._kernel = np.zeros((2, 2))
stc._sens_data = np.zeros((2, 3))
stc._data = None
assert_equal(stc.shape, (2, 3))
# bad size of data
stc = _fake_stc()
data = stc.data[:, np.newaxis, :]
with pytest.raises(ValueError, match='2 dimensions for SourceEstimate'):
SourceEstimate(data, stc.vertices, 0, 1)
stc = SourceEstimate(data[:, 0, 0], stc.vertices, 0, 1)
assert stc.data.shape == (len(data), 1)
def test_io_stc(tmp_path):
"""Test IO for STC files."""
stc = _fake_stc()
stc.save(tmp_path / "tmp.stc")
stc2 = read_source_estimate(tmp_path / "tmp.stc")
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
# test warning for complex data
stc2.data = stc2.data.astype(np.complex128)
with pytest.raises(ValueError, match='Cannot save complex-valued STC'):
stc2.save(tmp_path / 'complex.stc')
@requires_version('h5io')
@pytest.mark.parametrize('is_complex', (True, False))
@pytest.mark.parametrize('vector', (True, False))
def test_io_stc_h5(tmp_path, is_complex, vector):
"""Test IO for STC files using HDF5."""
if vector:
stc = _fake_vec_stc(is_complex=is_complex)
else:
stc = _fake_stc(is_complex=is_complex)
match = 'can only be written' if vector else "Invalid value for the 'ftype"
with pytest.raises(ValueError, match=match):
stc.save(tmp_path / 'tmp.h5', ftype='foo')
out_name = str(tmp_path / 'tmp')
stc.save(out_name, ftype='h5')
# test overwrite
assert op.isfile(out_name + '-stc.h5')
with pytest.raises(FileExistsError, match='Destination file exists'):
stc.save(out_name, ftype='h5')
stc.save(out_name, ftype='h5', overwrite=True)
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc')
stc5 = read_source_estimate(out_name + '-stc.h5')
pytest.raises(RuntimeError, read_source_estimate, out_name,
subject='bar')
for stc_new in stc3, stc4, stc5:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w(tmp_path):
"""Test IO for w files."""
stc = _fake_stc(n_time=1)
w_fname = tmp_path / 'fake'
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(tmp_path / 'tmp', ftype='w')
src2 = read_source_estimate(tmp_path / 'tmp-lh.w')
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files."""
stc = _fake_stc()
data = stc.data.copy()
vec_stc = _fake_vec_stc()
vec_data = vec_stc.data.copy()
out = list()
for a in [data, stc, vec_data, vec_stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with np.errstate(invalid='ignore'):
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with np.errstate(invalid='ignore'):
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(out[2], out[3].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
assert_array_equal(vec_stc.sqrt().data, np.sqrt(vec_stc.data))
assert_array_equal(abs(stc).data, abs(stc.data))
assert_array_equal(abs(vec_stc).data, abs(vec_stc.data))
stc_sum = stc.sum()
assert_array_equal(stc_sum.data, stc.data.sum(1, keepdims=True))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, stc.data.mean(1, keepdims=True))
vec_stc_mean = vec_stc.mean()
assert_array_equal(vec_stc_mean.data, vec_stc.data.mean(2, keepdims=True))
@pytest.mark.slowtest
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), resample()."""
stc_ = read_source_estimate(fname_stc)
# Make a vector version of the above source estimate
x = stc_.data[:, np.newaxis, :]
yz = np.zeros((x.shape[0], 2, x.shape[2]))
vec_stc_ = VectorSourceEstimate(
np.concatenate((x, yz), 1),
stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject
)
for stc in [stc_, vec_stc_]:
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
binned = stc.bin(.12)
a = np.mean(stc.data[..., :np.searchsorted(stc.times, .12)], axis=-1)
assert_array_equal(a, binned.data[..., 0])
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert (isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
pytest.raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
pytest.raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0)
assert (stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert (stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert (stc_new.data.shape[1] == stc.data.shape[1])
assert (stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_center_of_mass():
"""Test computing the center of mass on an stc."""
stc = read_source_estimate(fname_stc)
pytest.raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert (hemi == 1)
# XXX Should design a fool-proof test case, but here were the
# results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
@testing.requires_testing_data
@pytest.mark.parametrize('kind', ('surface', 'mixed'))
@pytest.mark.parametrize('vector', (False, True))
def test_extract_label_time_course(kind, vector):
"""Test extraction of label time courses from (Mixed)SourceEstimate."""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
if kind == 'mixed':
pytest.importorskip('nibabel')
label_names = ('Left-Cerebellum-Cortex',
'Right-Cerebellum-Cortex')
src += setup_volume_source_space(
'sample', pos=20., volume_label=label_names,
subjects_dir=subjects_dir, add_interpolator=False)
klass = MixedVectorSourceEstimate
else:
klass = VectorSourceEstimate
if not vector:
klass = klass._scalar_class
vertices = [s['vertno'] for s in src]
n_verts = np.array([len(v) for v in vertices])
vol_means = np.arange(-1, 1 - len(src), -1)
vol_means_t = np.repeat(vol_means[:, np.newaxis], n_times, axis=1)
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_tcs = dict(
mean=np.arange(n_labels)[:, None] * np.ones((n_labels, n_times)))
label_tcs['max'] = label_tcs['mean']
# compute the mean with sign flip
label_tcs['mean_flip'] = np.zeros_like(label_tcs['mean'])
for i, label in enumerate(labels):
label_tcs['mean_flip'][i] = i * np.mean(
label_sign_flip(label, src[:2]))
# generate some stc's with known data
stcs = list()
pad = (((0, 0), (2, 0), (0, 0)), 'constant')
for i in range(n_stcs):
data = np.zeros((n_verts.sum(), n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_tcs['mean'][j]
for j in range(len(vol_means)):
offset = n_verts[:2 + j].sum()
data[offset:offset + n_verts[j]] = vol_means[j]
if vector:
# the values it on the Z axis
data = np.pad(data[:, np.newaxis], *pad)
this_stc = klass(data, vertices, 0, 1)
stcs.append(this_stc)
if vector:
for key in label_tcs:
label_tcs[key] = np.pad(label_tcs[key][:, np.newaxis], *pad)
vol_means_t = np.pad(vol_means_t[:, np.newaxis], *pad)
# test some invalid inputs
with pytest.raises(ValueError, match="Invalid value for the 'mode'"):
extract_label_time_course(stcs, labels, src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
with pytest.raises(ValueError, match='does not contain any vertices'):
extract_label_time_course(stcs, empty_label, src)
# but this works:
with pytest.warns(RuntimeWarning, match='does not contain any vertices'):
tc = extract_label_time_course(stcs, empty_label, src,
allow_empty=True)
end_shape = (3, n_times) if vector else (n_times,)
for arr in tc:
assert arr.shape == (1 + len(vol_means),) + end_shape
assert_array_equal(arr[:1], np.zeros((1,) + end_shape))
if len(vol_means):
assert_array_equal(arr[1:], vol_means_t)
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max', 'auto']
for mode in modes:
if vector and mode not in ('mean', 'max', 'auto'):
with pytest.raises(ValueError, match='when using a vector'):
extract_label_time_course(stcs, labels, src, mode=mode)
continue
with _record_warnings(): # SVD convergence on arm64
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode)
for stc in stcs]
assert (len(label_tc) == n_stcs)
assert (len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert tc1.shape == (n_labels + len(vol_means),) + end_shape
assert tc2.shape == (n_labels + len(vol_means),) + end_shape
assert_allclose(tc1, tc2, rtol=1e-8, atol=1e-16)
if mode == 'auto':
use_mode = 'mean' if vector else 'mean_flip'
else:
use_mode = mode
# XXX we don't check pca_flip, probably should someday...
if use_mode in ('mean', 'max', 'mean_flip'):
assert_array_almost_equal(tc1[:n_labels], label_tcs[use_mode])
assert_array_almost_equal(tc1[n_labels:], vol_means_t)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src[:2])
assert (len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src[:2])
assert (x.size == 0)
@testing.requires_testing_data
@pytest.mark.parametrize('label_type, mri_res, vector, test_label, cf, call', [
(str, False, False, False, 'head', 'meth'), # head frame
(str, False, False, str, 'mri', 'func'), # fastest, default for testing
(str, False, True, int, 'mri', 'func'), # vector
(str, True, False, False, 'mri', 'func'), # mri_resolution
(list, True, False, False, 'mri', 'func'), # volume label as list
(dict, True, False, False, 'mri', 'func'), # volume label as dict
])
def test_extract_label_time_course_volume(
src_volume_labels, label_type, mri_res, vector, test_label, cf, call):
"""Test extraction of label time courses from Vol(Vector)SourceEstimate."""
src_labels, volume_labels, lut = src_volume_labels
n_tot = 46
assert n_tot == len(src_labels)
inv = read_inverse_operator(fname_inv_vol)
if cf == 'head':
src = inv['src']
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
rr = apply_trans(invert_transform(inv['mri_head_t']), src[0]['rr'])
else:
assert cf == 'mri'
src = read_source_spaces(fname_src_vol)
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI
rr = src[0]['rr']
for s in src_labels:
assert_allclose(s['rr'], rr, atol=1e-7)
assert len(src) == 1 and src.kind == 'volume'
klass = VolVectorSourceEstimate
if not vector:
klass = klass._scalar_class
vertices = [src[0]['vertno']]
n_verts = len(src[0]['vertno'])
n_times = 50
data = vertex_values = np.arange(1, n_verts + 1)
end_shape = (n_times,)
if vector:
end_shape = (3,) + end_shape
data = np.pad(data[:, np.newaxis], ((0, 0), (2, 0)), 'constant')
data = np.repeat(data[..., np.newaxis], n_times, -1)
stcs = [klass(data.astype(float), vertices, 0, 1)]
def eltc(*args, **kwargs):
if call == 'func':
return extract_label_time_course(stcs, *args, **kwargs)
else:
assert call == 'meth'
return [stcs[0].extract_label_time_course(*args, **kwargs)]
with pytest.raises(RuntimeError, match='atlas vox_mri_t does not match'):
eltc(fname_fs_t1, src, mri_resolution=mri_res)
assert len(src_labels) == 46 # includes unknown
assert_array_equal(
src[0]['vertno'], # src includes some in "unknown" space
np.sort(np.concatenate([s['vertno'] for s in src_labels])))
# spot check
assert src_labels[-1]['seg_name'] == 'CC_Anterior'
assert src[0]['nuse'] == 4157
assert len(src[0]['vertno']) == 4157
assert sum(s['nuse'] for s in src_labels) == 4157
assert_array_equal(src_labels[-1]['vertno'], [8011, 8032, 8557])
assert_array_equal(
np.where(np.in1d(src[0]['vertno'], [8011, 8032, 8557]))[0],
[2672, 2688, 2995])
# triage "labels" argument
if mri_res:
# All should be there
missing = []
else:
# Nearest misses these
missing = ['Left-vessel', 'Right-vessel', '5th-Ventricle',
'non-WM-hypointensities']
n_want = len(src_labels)
if label_type is str:
labels = fname_aseg
elif label_type is list:
labels = (fname_aseg, volume_labels)
else:
assert label_type is dict
labels = (fname_aseg, {k: lut[k] for k in volume_labels})
assert mri_res
assert len(missing) == 0
# we're going to add one that won't exist
missing = ['intentionally_bad']
labels[1][missing[0]] = 10000
n_want += 1
n_tot += 1
n_want -= len(missing)
# actually do the testing
if cf == 'head' and not mri_res: # some missing
with pytest.warns(RuntimeWarning, match='any vertices'):
eltc(labels, src, allow_empty=True, mri_resolution=mri_res)
for mode in ('mean', 'max'):
with catch_logging() as log:
label_tc = eltc(labels, src, mode=mode, allow_empty='ignore',
mri_resolution=mri_res, verbose=True)
log = log.getvalue()
assert re.search('^Reading atlas.*aseg\\.mgz\n', log) is not None
if len(missing):
# assert that the missing ones get logged
assert 'does not contain' in log
assert repr(missing) in log
else:
assert 'does not contain' not in log
assert '\n%d/%d atlas regions had at least' % (n_want, n_tot) in log
assert len(label_tc) == 1
label_tc = label_tc[0]
assert label_tc.shape == (n_tot,) + end_shape
if vector:
assert_array_equal(label_tc[:, :2], 0.)
label_tc = label_tc[:, 2]
assert label_tc.shape == (n_tot, n_times)
# let's test some actual values by trusting the masks provided by
# setup_volume_source_space. mri_resolution=True does some
# interpolation so we should not expect equivalence, False does
# nearest so we should.
if mri_res:
rtol = 0.2 if mode == 'mean' else 0.8 # max much more sensitive
else:
rtol = 0.
for si, s in enumerate(src_labels):
func = dict(mean=np.mean, max=np.max)[mode]
these = vertex_values[np.in1d(src[0]['vertno'], s['vertno'])]
assert len(these) == s['nuse']
if si == 0 and s['seg_name'] == 'Unknown':
continue # unknown is crappy
if s['nuse'] == 0:
want = 0.
if mri_res:
# this one is totally due to interpolation, so no easy
# test here
continue
else:
want = func(these)
assert_allclose(label_tc[si], want, atol=1e-6, rtol=rtol)
# compare with in_label, only on every fourth for speed
if test_label is not False and si % 4 == 0:
label = s['seg_name']
if test_label is int:
label = lut[label]
in_label = stcs[0].in_label(
label, fname_aseg, src).data
assert in_label.shape == (s['nuse'],) + end_shape
if vector:
assert_array_equal(in_label[:, :2], 0.)
in_label = in_label[:, 2]
if want == 0:
assert in_label.shape[0] == 0
else:
in_label = func(in_label)
assert_allclose(in_label, want, atol=1e-6, rtol=rtol)
if mode == 'mean' and not vector: # check the reverse
if label_type is dict:
ctx = pytest.warns(RuntimeWarning, match='does not contain')
else:
ctx = nullcontext()
with ctx:
stc_back = labels_to_stc(labels, label_tc, src=src)
assert stc_back.data.shape == stcs[0].data.shape
corr = np.corrcoef(stc_back.data.ravel(),
stcs[0].data.ravel())[0, 1]
assert 0.6 < corr < 0.63
assert_allclose(_varexp(label_tc, label_tc), 1.)
ve = _varexp(stc_back.data, stcs[0].data)
assert 0.83 < ve < 0.85
with _record_warnings(): # ignore no output
label_tc_rt = extract_label_time_course(
stc_back, labels, src=src, mri_resolution=mri_res,
allow_empty=True)
assert label_tc_rt.shape == label_tc.shape
corr = np.corrcoef(label_tc.ravel(), label_tc_rt.ravel())[0, 1]
lower, upper = (0.99, 0.999) if mri_res else (0.95, 0.97)
assert lower < corr < upper
ve = _varexp(label_tc_rt, label_tc)
lower, upper = (0.99, 0.999) if mri_res else (0.97, 0.99)
assert lower < ve < upper
def _varexp(got, want):
return max(
1 - np.linalg.norm(got.ravel() - want.ravel()) ** 2 /
np.linalg.norm(want) ** 2, 0.)
@testing.requires_testing_data
def test_extract_label_time_course_equiv():
"""Test extraction of label time courses from stc equivalences."""
label = read_labels_from_annot('sample', 'aparc', 'lh', regexp='transv',
subjects_dir=subjects_dir)
assert len(label) == 1
label = label[0]
inv = read_inverse_operator(fname_inv)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
stc = apply_inverse(evoked, inv, pick_ori='normal', label=label)
stc_full = apply_inverse(evoked, inv, pick_ori='normal')
stc_in_label = stc_full.in_label(label)
mean = stc.extract_label_time_course(label, inv['src'])
mean_2 = stc_in_label.extract_label_time_course(label, inv['src'])
assert_allclose(mean, mean_2)
inv['src'][0]['vertno'] = np.array([], int)
assert len(stc_in_label.vertices[0]) == 22
with pytest.raises(ValueError, match='22/22 left hemisphere.*missing'):
stc_in_label.extract_label_time_course(label, inv['src'])
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result."""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data."""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = rng.randn(n_vertices, n_sensors)
sens_data = rng.randn(n_sensors, n_times)
vertices = [np.arange(n_vertices)]
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
# bad sens_data
sens_data = sens_data[..., np.newaxis]
with pytest.raises(ValueError, match='sensor data must have 2'):
VolSourceEstimate((kernel, sens_data), vertices, 0, 1)
def test_transform():
"""Test applying linear (time) transform to data."""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert (isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
pytest.raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert (isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
tmin_idx = np.searchsorted(times, 0)
tmax_idx = np.searchsorted(times, 501) # Include 500ms in the range
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert (isinstance(stc, SourceEstimate))
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.5)
assert_equal(len(stc.vertices[0]), 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.2)
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_adjacency():
"""Test spatio-temporal adjacency from triangles."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
adjacency = spatio_temporal_tris_adjacency(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), adjacency)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert len(new_fmt) == len(components)
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_adjacency():
"""Test spatio-temporal adjacency from source spaces."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
adjacency = spatio_temporal_tris_adjacency(tris, 2).todense()
assert_allclose(np.diag(adjacency), 1.)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
adjacency2 = spatio_temporal_src_adjacency(src, 2)
assert_array_equal(adjacency2.todense(), adjacency)
# add test for dist adjacency
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
adjacency3 = spatio_temporal_src_adjacency(src, 2, dist=2)
assert_array_equal(adjacency3.todense(), adjacency)
# add test for source space adjacency with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
src_ = inverse_operator['src']
with pytest.warns(RuntimeWarning, match='will have holes'):
adjacency = spatio_temporal_src_adjacency(src_, n_times=2)
a = adjacency.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert (a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[:1], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
df = stc.to_data_frame()
# test data preservation (first 2 dataframe elements are subj & time)
assert_array_equal(df.values.T[2:], stc.data)
# test long format
df_long = stc.to_data_frame(long_format=True)
assert(len(df_long) == stc.data.size)
expected = ('subject', 'time', 'source', 'value')
assert set(expected) == set(df_long.columns)
@requires_pandas
@pytest.mark.parametrize('index', ('time', ['time', 'subject'], None))
def test_to_data_frame_index(index):
"""Test index creation in stc Pandas exporter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)]
data = rng.randn(n_vert, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
df = stc.to_data_frame(index=index)
# test index setting
if not isinstance(index, list):
index = [index]
assert (df.index.names == index)
# test that non-indexed data were present as columns
non_index = list(set(['time', 'subject']) - set(index))
if len(non_index):
assert all(np.in1d(non_index, df.columns))
@pytest.mark.parametrize('kind', ('surface', 'mixed', 'volume'))
@pytest.mark.parametrize('vector', (False, True))
@pytest.mark.parametrize('n_times', (5, 1))
def test_get_peak(kind, vector, n_times):
"""Test peak getter."""
n_vert = 10
vertices = [np.arange(n_vert)]
if kind == 'surface':
klass = VectorSourceEstimate
vertices += [np.empty(0, int)]
elif kind == 'mixed':
klass = MixedVectorSourceEstimate
vertices += [np.empty(0, int), np.empty(0, int)]
else:
assert kind == 'volume'
klass = VolVectorSourceEstimate
data = np.zeros((n_vert, n_times))
data[1, -1] = 1
if vector:
data = np.repeat(data[:, np.newaxis], 3, 1)
else:
klass = klass._scalar_class
stc = klass(data, vertices, 0, 1)
with pytest.raises(ValueError, match='out of bounds'):
stc.get_peak(tmin=-100)
with pytest.raises(ValueError, match='out of bounds'):
stc.get_peak(tmax=90)
with pytest.raises(ValueError,
match='must be <=' if n_times > 1 else 'out of'):
stc.get_peak(tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices)
assert vert_idx in vertno
assert time_idx in stc.times
data_idx, time_idx = stc.get_peak(vert_as_index=True, time_as_index=True)
if vector:
use_data = stc.magnitude().data
else:
use_data = stc.data
assert data_idx == 1
assert time_idx == n_times - 1
assert data_idx == np.argmax(np.abs(use_data[:, time_idx]))
assert time_idx == np.argmax(np.abs(use_data[data_idx, :]))
if kind == 'surface':
data_idx_2, time_idx_2 = stc.get_peak(
vert_as_index=True, time_as_index=True, hemi='lh')
assert data_idx_2 == data_idx
assert time_idx_2 == time_idx
with pytest.raises(RuntimeError, match='no vertices'):
stc.get_peak(hemi='rh')
@requires_version('h5io')
@testing.requires_testing_data
def test_mixed_stc(tmp_path):
"""Test source estimate from mixed source space."""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
pytest.raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
# make sure error is raised for plotting surface with volume source
fname = tmp_path / 'mixed-stc.h5'
stc.save(fname)
stc_out = read_source_estimate(fname)
assert_array_equal(stc_out.vertices, vertno)
assert_array_equal(stc_out.data, data)
assert stc_out.tmin == 0
assert stc_out.tstep == 1
assert isinstance(stc_out, MixedSourceEstimate)
@requires_version('h5io')
@pytest.mark.parametrize('klass, kind', [
(VectorSourceEstimate, 'surf'),
(VolVectorSourceEstimate, 'vol'),
(VolVectorSourceEstimate, 'discrete'),
(MixedVectorSourceEstimate, 'mixed'),
])
@pytest.mark.parametrize('dtype', [
np.float32, np.float64, np.complex64, np.complex128])
def test_vec_stc_basic(tmp_path, klass, kind, dtype):
"""Test (vol)vector source estimate."""
nn = np.array([
[1, 0, 0],
[0, 1, 0],
[np.sqrt(1. / 2.), 0, np.sqrt(1. / 2.)],
[np.sqrt(1 / 3.)] * 3
], np.float32)
data = np.array([
[1, 0, 0],
[0, 2, 0],
[-3, 0, 0],
[1, 1, 1],
], dtype)[:, :, np.newaxis]
amplitudes = np.array([1, 2, 3, np.sqrt(3)], dtype)
magnitudes = amplitudes.copy()
normals = np.array([1, 2, -3. / np.sqrt(2), np.sqrt(3)], dtype)
if dtype in (np.complex64, np.complex128):
data *= 1j
amplitudes *= 1j
normals *= 1j
directions = np.array(
[[1, 0, 0], [0, 1, 0], [-1, 0, 0], [1. / np.sqrt(3)] * 3])
vol_kind = kind if kind in ('discrete', 'vol') else 'vol'
vol_src = SourceSpaces([dict(nn=nn, type=vol_kind)])
assert vol_src.kind == dict(vol='volume').get(vol_kind, vol_kind)
vol_verts = [np.arange(4)]
surf_src = SourceSpaces([dict(nn=nn[:2], type='surf'),
dict(nn=nn[2:], type='surf')])
assert surf_src.kind == 'surface'
surf_verts = [np.array([0, 1]), np.array([0, 1])]
if klass is VolVectorSourceEstimate:
src = vol_src
verts = vol_verts
elif klass is VectorSourceEstimate:
src = surf_src
verts = surf_verts
if klass is MixedVectorSourceEstimate:
src = surf_src + vol_src
verts = surf_verts + vol_verts
assert src.kind == 'mixed'
data = np.tile(data, (2, 1, 1))
amplitudes = np.tile(amplitudes, 2)
magnitudes = np.tile(magnitudes, 2)
normals = np.tile(normals, 2)
directions = np.tile(directions, (2, 1))
stc = klass(data, verts, 0, 1, 'foo')
amplitudes = amplitudes[:, np.newaxis]
magnitudes = magnitudes[:, np.newaxis]
# Magnitude of the vectors
assert_array_equal(stc.magnitude().data, magnitudes)
# Vector components projected onto the vertex normals
if kind in ('vol', 'mixed'):
with pytest.raises(RuntimeError, match='surface or discrete'):
stc.project('normal', src)[0]
else:
normal = stc.project('normal', src)[0]
assert_allclose(normal.data[:, 0], normals)
# Maximal-variance component, either to keep amps pos or to align to src-nn
projected, got_directions = stc.project('pca')
assert_allclose(got_directions, directions)
assert_allclose(projected.data, amplitudes)
projected, got_directions = stc.project('pca', src)
flips = np.array([[1], [1], [-1.], [1]])
if klass is MixedVectorSourceEstimate:
flips = np.tile(flips, (2, 1))
assert_allclose(got_directions, directions * flips)
assert_allclose(projected.data, amplitudes * flips)
out_name = tmp_path / 'temp.h5'
stc.save(out_name)
stc_read = read_source_estimate(out_name)
assert_allclose(stc.data, stc_read.data)
assert len(stc.vertices) == len(stc_read.vertices)
for v1, v2 in zip(stc.vertices, stc_read.vertices):
assert_array_equal(v1, v2)
stc = klass(data[:, :, 0], verts, 0, 1) # upbroadcast
assert stc.data.shape == (len(data), 3, 1)
# Bad data
with pytest.raises(ValueError, match='must have shape.*3'):
klass(data[:, :2], verts, 0, 1)
data = data[:, :, np.newaxis]
with pytest.raises(ValueError, match='3 dimensions for .*VectorSource'):
klass(data, verts, 0, 1)
@pytest.mark.parametrize('real', (True, False))
def test_source_estime_project(real):
"""Test projecting a source estimate onto direction of max power."""
n_src, n_times = 4, 100
rng = np.random.RandomState(0)
data = rng.randn(n_src, 3, n_times)
if not real:
data = data + 1j * rng.randn(n_src, 3, n_times)
assert data.dtype == np.complex128
else:
assert data.dtype == np.float64
# Make sure that the normal we get maximizes the power
# (i.e., minimizes the negative power)
want_nn = np.empty((n_src, 3))
for ii in range(n_src):
x0 = np.ones(3)
def objective(x):
x = x / np.linalg.norm(x)
return -np.linalg.norm(np.dot(x, data[ii]))
want_nn[ii] = fmin_cobyla(objective, x0, (), rhobeg=0.1, rhoend=1e-6)
want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True)
stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1)
stc_max, directions = stc.project('pca')
flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True))
directions *= flips
assert_allclose(directions, want_nn, atol=2e-6)
@testing.requires_testing_data
def test_source_estime_project_label():
"""Test projecting a source estimate onto direction of max power."""
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
stc_free = apply_inverse(evoked, free, pick_ori='vector')
stc_pca = stc_free.project('pca', fwd['src'])[0]
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_in_label = stc_free.in_label(new_label)
stc_pca_in_label = stc_pca.in_label(new_label)
stc_in_label_pca = stc_in_label.project('pca', fwd['src'])[0]
assert_array_equal(stc_pca_in_label.data, stc_in_label_pca.data)
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def invs():
"""Inverses of various amounts of loose."""
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
free_surf = make_inverse_operator(
evoked.info, fwd_surf, noise_cov, loose=1.)
freeish = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.9999)
fixed = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.)
fixedish = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.0001)
assert_allclose(free['source_nn'],
np.kron(np.ones(fwd['nsource']), np.eye(3)).T,
atol=1e-7)
# This is the one exception:
assert not np.allclose(free['source_nn'], free_surf['source_nn'])
assert_allclose(free['source_nn'],
np.tile(np.eye(3), (free['nsource'], 1)), atol=1e-7)
# All others are similar:
for other in (freeish, fixedish):
assert_allclose(free_surf['source_nn'], other['source_nn'], atol=1e-7)
assert_allclose(
free_surf['source_nn'][2::3], fixed['source_nn'], atol=1e-7)
expected_nn = np.concatenate([_get_src_nn(s) for s in fwd['src']])
assert_allclose(fixed['source_nn'], expected_nn, atol=1e-7)
return evoked, free, free_surf, freeish, fixed, fixedish
bad_normal = pytest.param(
'normal', marks=pytest.mark.xfail(raises=AssertionError))
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector'])
def test_vec_stc_inv_free(invs, pick_ori):
"""Test vector STC behavior with two free-orientation inverses."""
evoked, free, free_surf, _, _, _ = invs
stc_free = apply_inverse(evoked, free, pick_ori=pick_ori)
stc_free_surf = apply_inverse(evoked, free_surf, pick_ori=pick_ori)
assert_allclose(stc_free.data, stc_free_surf.data, atol=1e-5)
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector'])
def test_vec_stc_inv_free_surf(invs, pick_ori):
"""Test vector STC behavior with free and free-ish orientation invs."""
evoked, _, free_surf, freeish, _, _ = invs
stc_free = apply_inverse(evoked, free_surf, pick_ori=pick_ori)
stc_freeish = apply_inverse(evoked, freeish, pick_ori=pick_ori)
assert_allclose(stc_free.data, stc_freeish.data, atol=1e-3)
@pytest.mark.parametrize('pick_ori', (None, 'normal', 'vector'))
def test_vec_stc_inv_fixed(invs, pick_ori):
"""Test vector STC behavior with fixed-orientation inverses."""
evoked, _, _, _, fixed, fixedish = invs
stc_fixed = apply_inverse(evoked, fixed)
stc_fixed_vector = apply_inverse(evoked, fixed, pick_ori='vector')
assert_allclose(stc_fixed.data,
stc_fixed_vector.project('normal', fixed['src'])[0].data)
stc_fixedish = apply_inverse(evoked, fixedish, pick_ori=pick_ori)
if pick_ori == 'vector':
assert_allclose(stc_fixed_vector.data, stc_fixedish.data, atol=1e-2)
# two ways here: with magnitude...
assert_allclose(
abs(stc_fixed).data, stc_fixedish.magnitude().data, atol=1e-2)
# ... and when picking the normal (signed)
stc_fixedish = stc_fixedish.project('normal', fixedish['src'])[0]
elif pick_ori is None:
stc_fixed = abs(stc_fixed)
else:
assert pick_ori == 'normal' # no need to modify
assert_allclose(stc_fixed.data, stc_fixedish.data, atol=1e-2)
@testing.requires_testing_data
def test_epochs_vector_inverse():
"""Test vector inverse consistency between evoked and epochs."""
raw = read_raw_fif(fname_raw)
events = find_events(raw, stim_channel='STI 014')[:2]
reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6)
epochs = Epochs(raw, events, None, 0, 0.01, baseline=None,
reject=reject, preload=True)
assert_equal(len(epochs), 2)
evoked = epochs.average(picks=range(len(epochs.ch_names)))
inv = read_inverse_operator(fname_inv)
method = "MNE"
snr = 3.
lambda2 = 1. / snr ** 2
stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method,
pick_ori='vector', return_generator=False)
stc_epo = np.mean(stcs_epo)
stc_evo = apply_inverse(evoked, inv, lambda2, method=method,
pick_ori='vector')
assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0)
@requires_sklearn
@testing.requires_testing_data
def test_vol_adjacency():
"""Test volume adjacency."""
vol = read_source_spaces(fname_vsrc)
pytest.raises(ValueError, spatial_src_adjacency, vol, dist=1.)
adjacency = spatial_src_adjacency(vol)
n_vertices = vol[0]['inuse'].sum()
assert_equal(adjacency.shape, (n_vertices, n_vertices))
assert (np.all(adjacency.data == 1))
assert (isinstance(adjacency, sparse.coo_matrix))
adjacency2 = spatio_temporal_src_adjacency(vol, n_times=2)
assert_equal(adjacency2.shape, (2 * n_vertices, 2 * n_vertices))
assert (np.all(adjacency2.data == 1))
@testing.requires_testing_data
def test_spatial_src_adjacency():
"""Test spatial adjacency functionality."""
# oct
src = read_source_spaces(fname_src)
assert src[0]['dist'] is not None # distance info
with pytest.warns(RuntimeWarning, match='will have holes'):
con = spatial_src_adjacency(src).toarray()
con_dist = spatial_src_adjacency(src, dist=0.01).toarray()
assert (con == con_dist).mean() > 0.75
# ico
src = read_source_spaces(fname_src_fs)
con = spatial_src_adjacency(src).tocsr()
con_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr()
assert con.shape == con_tris.shape
assert_array_equal(con.data, con_tris.data)
assert_array_equal(con.indptr, con_tris.indptr)
assert_array_equal(con.indices, con_tris.indices)
# one hemi
con_lh = spatial_src_adjacency(src[:1]).tocsr()
con_lh_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr()
con_lh_tris = con_lh_tris[:10242, :10242].tocsr()
assert_array_equal(con_lh.data, con_lh_tris.data)
assert_array_equal(con_lh.indptr, con_lh_tris.indptr)
assert_array_equal(con_lh.indices, con_lh_tris.indices)
@requires_sklearn
@requires_nibabel()
@testing.requires_testing_data
def test_vol_mask():
"""Test extraction of volume mask."""
src = read_source_spaces(fname_vsrc)
mask = _get_vol_mask(src)
# Let's use an alternative way that should be equivalent
vertices = [src[0]['vertno']]
n_vertices = len(vertices[0])
data = (1 + np.arange(n_vertices))[:, np.newaxis]
stc_tmp = VolSourceEstimate(data, vertices, tmin=0., tstep=1.)
img = stc_tmp.as_volume(src, mri_resolution=False)
img_data = _get_img_fdata(img)[:, :, :, 0].T
mask_nib = (img_data != 0)
assert_array_equal(img_data[mask_nib], data[:, 0])
assert_array_equal(np.where(mask_nib.ravel())[0], src[0]['vertno'])
assert_array_equal(mask, mask_nib)
assert_array_equal(img_data.shape, mask.shape)
@testing.requires_testing_data
def test_stc_near_sensors(tmp_path):
"""Test stc_near_sensors."""
info = read_info(fname_evoked)
# pick the left EEG sensors
picks = pick_types(info, meg=False, eeg=True, exclude=())
picks = [pick for pick in picks if info['chs'][pick]['loc'][0] < 0]
pick_info(info, picks, copy=False)
with info._unlock():
info['projs'] = []
info['bads'] = []
assert info['nchan'] == 33
evoked = EvokedArray(np.eye(info['nchan']), info)
trans = read_trans(fname_fwd)
assert trans['to'] == FIFF.FIFFV_COORD_HEAD
this_dir = str(tmp_path)
# testing does not have pial, so fake it
os.makedirs(op.join(this_dir, 'sample', 'surf'))
for hemi in ('lh', 'rh'):
copyfile(op.join(subjects_dir, 'sample', 'surf', f'{hemi}.white'),
op.join(this_dir, 'sample', 'surf', f'{hemi}.pial'))
# here we use a distance is smaller than the inter-sensor distance
kwargs = dict(subject='sample', trans=trans, subjects_dir=this_dir,
verbose=True, distance=0.005)
with pytest.raises(ValueError, match='No appropriate channels'):
stc_near_sensors(evoked, **kwargs)
evoked.set_channel_types({ch_name: 'ecog' for ch_name in evoked.ch_names})
with catch_logging() as log:
stc = stc_near_sensors(evoked, **kwargs)
log = log.getvalue()
assert 'Minimum projected intra-sensor distance: 7.' in log # 7.4
# this should be left-hemisphere dominant
assert 5000 > len(stc.vertices[0]) > 4000
assert 200 > len(stc.vertices[1]) > 100
# and at least one vertex should have the channel values
dists = cdist(stc.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(0).all()
src = read_source_spaces(fname_src) # uses "white" but should be okay
for s in src:
transform_surface_to(s, 'head', trans, copy=False)
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
stc_src = stc_near_sensors(evoked, src=src, **kwargs)
assert len(stc_src.data) == 7928
with pytest.warns(RuntimeWarning, match='not included'): # some removed
stc_src_full = compute_source_morph(
stc_src, 'sample', 'sample', smooth=5, spacing=None,
subjects_dir=subjects_dir).apply(stc_src)
lh_idx = np.searchsorted(stc_src_full.vertices[0], stc.vertices[0])
rh_idx = np.searchsorted(stc_src_full.vertices[1], stc.vertices[1])
rh_idx += len(stc_src_full.vertices[0])
sub_data = stc_src_full.data[np.concatenate([lh_idx, rh_idx])]
assert sub_data.shape == stc.data.shape
corr = np.corrcoef(stc.data.ravel(), sub_data.ravel())[0, 1]
assert 0.6 < corr < 0.7
# now single-weighting mode
stc_w = stc_near_sensors(evoked, mode='single', **kwargs)
assert_array_less(stc_w.data, stc.data + 1e-3) # some tol
assert len(stc_w.data) == len(stc.data)
# at least one for each sensor should have projected right on it
dists = cdist(stc_w.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(0).all()
# finally, nearest mode: all should match
stc_n = stc_near_sensors(evoked, mode='nearest', **kwargs)
assert len(stc_n.data) == len(stc.data)
# at least one for each sensor should have projected right on it
dists = cdist(stc_n.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(1).all() # all vert eq some ch
# these are EEG electrodes, so the distance 0.01 is too small for the
# scalp+skull. Even at a distance of 33 mm EEG 060 is too far:
with pytest.warns(RuntimeWarning, match='Channel missing in STC: EEG 060'):
stc = stc_near_sensors(evoked, trans, 'sample', subjects_dir=this_dir,
project=False, distance=0.033)
assert stc.data.any(0).sum() == len(evoked.ch_names) - 1
# and now with volumetric projection
src = read_source_spaces(fname_vsrc)
with catch_logging() as log:
stc_vol = stc_near_sensors(
evoked, trans, 'sample', src=src, surface=None,
subjects_dir=subjects_dir, distance=0.033, verbose=True)
assert isinstance(stc_vol, VolSourceEstimate)
log = log.getvalue()
assert '4157 volume vertices' in log
@requires_version('pymatreader')
@testing.requires_testing_data
def test_stc_near_sensors_picks():
"""Test using picks with stc_near_sensors."""
info = mne.io.read_raw_nirx(fname_nirx).info
evoked = mne.EvokedArray(np.ones((len(info['ch_names']), 1)), info)
src = mne.read_source_spaces(fname_src_fs)
kwargs = dict(
evoked=evoked, subject='fsaverage', trans='fsaverage',
subjects_dir=subjects_dir, src=src, surface=None, project=True)
with pytest.raises(ValueError, match='No appropriate channels'):
stc_near_sensors(**kwargs)
picks = np.arange(len(info['ch_names']))
data = stc_near_sensors(picks=picks, **kwargs).data
assert len(data) == 20484
assert (data >= 0).all()
data = data[data > 0]
n_pts = len(data)
assert 500 < n_pts < 600
lo, hi = np.percentile(data, (5, 95))
assert 0.01 < lo < 0.1
assert 1.3 < hi < 1.7 # > 1
data = stc_near_sensors(picks=picks, mode='weighted', **kwargs).data
assert (data >= 0).all()
data = data[data > 0]
assert len(data) == n_pts
assert_array_equal(data, 1.) # values preserved
def _make_morph_map_hemi_same(subject_from, subject_to, subjects_dir,
reg_from, reg_to):
return _make_morph_map_hemi(subject_from, subject_from, subjects_dir,
reg_from, reg_from)
@requires_nibabel()
@testing.requires_testing_data
@pytest.mark.parametrize('kind', (
pytest.param('volume', marks=[requires_version('dipy'),
pytest.mark.slowtest]),
'surface',
))
@pytest.mark.parametrize('scale', ((1.0, 0.8, 1.2), 1., 0.9))
def test_scale_morph_labels(kind, scale, monkeypatch, tmp_path):
"""Test label extraction, morphing, and MRI scaling relationships."""
tempdir = str(tmp_path)
subject_from = 'sample'
subject_to = 'small'
testing_dir = op.join(subjects_dir, subject_from)
from_dir = op.join(tempdir, subject_from)
for root in ('mri', 'surf', 'label', 'bem'):
os.makedirs(op.join(from_dir, root), exist_ok=True)
for hemi in ('lh', 'rh'):
for root, fname in (('surf', 'sphere'), ('surf', 'white'),
('surf', 'sphere.reg'),
('label', 'aparc.annot')):
use_fname = op.join(root, f'{hemi}.{fname}')
copyfile(op.join(testing_dir, use_fname),
op.join(from_dir, use_fname))
for root, fname in (('mri', 'aseg.mgz'), ('mri', 'brain.mgz')):
use_fname = op.join(root, fname)
copyfile(op.join(testing_dir, use_fname),
op.join(from_dir, use_fname))
del testing_dir
if kind == 'surface':
src_from = read_source_spaces(fname_src_3)
assert src_from[0]['dist'] is None
assert src_from[0]['nearest'] is not None
# avoid patch calc
src_from[0]['nearest'] = src_from[1]['nearest'] = None
assert len(src_from) == 2
assert src_from[0]['nuse'] == src_from[1]['nuse'] == 258
klass = SourceEstimate
labels_from = read_labels_from_annot(
subject_from, subjects_dir=tempdir)
n_labels = len(labels_from)
write_source_spaces(op.join(tempdir, subject_from, 'bem',
f'{subject_from}-oct-4-src.fif'), src_from)
else:
assert kind == 'volume'
pytest.importorskip('dipy')
src_from = read_source_spaces(fname_src_vol)
src_from[0]['subject_his_id'] = subject_from
labels_from = op.join(
tempdir, subject_from, 'mri', 'aseg.mgz')
n_labels = 46
assert op.isfile(labels_from)
klass = VolSourceEstimate
assert len(src_from) == 1
assert src_from[0]['nuse'] == 4157
write_source_spaces(
op.join(from_dir, 'bem', 'sample-vol20-src.fif'), src_from)
scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir,
annot=True, skip_fiducials=True, verbose=True,
overwrite=True)
if kind == 'surface':
src_to = read_source_spaces(
op.join(tempdir, subject_to, 'bem',
f'{subject_to}-oct-4-src.fif'))
labels_to = read_labels_from_annot(
subject_to, subjects_dir=tempdir)
# Save time since we know these subjects are identical
monkeypatch.setattr(mne.morph_map, '_make_morph_map_hemi',
_make_morph_map_hemi_same)
else:
src_to = read_source_spaces(
op.join(tempdir, subject_to, 'bem',
f'{subject_to}-vol20-src.fif'))
labels_to = op.join(
tempdir, subject_to, 'mri', 'aseg.mgz')
# 1. Label->STC->Label for the given subject should be identity
# (for surfaces at least; for volumes it's not as clean as this
# due to interpolation)
n_times = 50
rng = np.random.RandomState(0)
label_tc = rng.randn(n_labels, n_times)
# check that a random permutation of our labels yields a terrible
# correlation
corr = np.corrcoef(label_tc.ravel(),
rng.permutation(label_tc).ravel())[0, 1]
assert -0.06 < corr < 0.06
# project label activations to full source space
with pytest.raises(ValueError, match='subject'):
labels_to_stc(labels_from, label_tc, src=src_from, subject='foo')
stc = labels_to_stc(labels_from, label_tc, src=src_from)
assert stc.subject == 'sample'
assert isinstance(stc, klass)
label_tc_from = extract_label_time_course(
stc, labels_from, src_from, mode='mean')
if kind == 'surface':
assert_allclose(label_tc, label_tc_from, rtol=1e-12, atol=1e-12)
else:
corr = np.corrcoef(label_tc.ravel(), label_tc_from.ravel())[0, 1]
assert 0.93 < corr < 0.95
#
# 2. Changing STC subject to the surrogate and then extracting
#
stc.subject = subject_to
label_tc_to = extract_label_time_course(
stc, labels_to, src_to, mode='mean')
assert_allclose(label_tc_from, label_tc_to, rtol=1e-12, atol=1e-12)
stc.subject = subject_from
#
# 3. Morphing STC to new subject then extracting
#
if isinstance(scale, tuple) and kind == 'volume':
ctx = nullcontext()
test_morph = True
elif kind == 'surface':
ctx = pytest.warns(RuntimeWarning, match='not included')
test_morph = True
else:
ctx = nullcontext()
test_morph = True
with ctx: # vertices not included
morph = compute_source_morph(
src_from, subject_to=subject_to, src_to=src_to,
subjects_dir=tempdir, niter_sdr=(), smooth=1,
zooms=14., verbose=True) # speed up with higher zooms
if kind == 'volume':
got_affine = morph.pre_affine.affine
want_affine = np.eye(4)
want_affine.ravel()[::5][:3] = 1. / np.array(scale, float)
# just a scaling (to within 1% if zooms=None, 20% with zooms=10)
assert_allclose(want_affine[:, :3], got_affine[:, :3], atol=0.4)
assert got_affine[3, 3] == 1.
# little translation (to within `limit` mm)
move = np.linalg.norm(got_affine[:3, 3])
limit = 2. if scale == 1. else 12
assert move < limit, scale
if test_morph:
stc_to = morph.apply(stc)
label_tc_to_morph = extract_label_time_course(
stc_to, labels_to, src_to, mode='mean')
if kind == 'volume':
corr = np.corrcoef(
label_tc.ravel(), label_tc_to_morph.ravel())[0, 1]
if isinstance(scale, tuple):
# some other fixed constant
# min_, max_ = 0.84, 0.855 # zooms='auto' values
min_, max_ = 0.57, 0.67
elif scale == 1:
# min_, max_ = 0.85, 0.875 # zooms='auto' values
min_, max_ = 0.72, 0.76
else:
# min_, max_ = 0.84, 0.855 # zooms='auto' values
min_, max_ = 0.46, 0.63
assert min_ < corr <= max_, scale
else:
assert_allclose(
label_tc, label_tc_to_morph, atol=1e-12, rtol=1e-12)
#
# 4. The same round trip from (1) but in the warped space
#
stc = labels_to_stc(labels_to, label_tc, src=src_to)
assert isinstance(stc, klass)
label_tc_to = extract_label_time_course(
stc, labels_to, src_to, mode='mean')
if kind == 'surface':
assert_allclose(label_tc, label_tc_to, rtol=1e-12, atol=1e-12)
else:
corr = np.corrcoef(label_tc.ravel(), label_tc_to.ravel())[0, 1]
assert 0.93 < corr < 0.96, scale
@testing.requires_testing_data
@pytest.mark.parametrize('kind', [
'surface',
pytest.param('volume', marks=[pytest.mark.slowtest,
requires_version('nibabel')]),
])
def test_label_extraction_subject(kind):
"""Test that label extraction subject is treated properly."""
if kind == 'surface':
inv = read_inverse_operator(fname_inv)
labels = read_labels_from_annot(
'sample', subjects_dir=subjects_dir)
labels_fs = read_labels_from_annot(
'fsaverage', subjects_dir=subjects_dir)
labels_fs = [label for label in labels_fs
if not label.name.startswith('unknown')]
assert all(label.subject == 'sample' for label in labels)
assert all(label.subject == 'fsaverage' for label in labels_fs)
assert len(labels) == len(labels_fs) == 68
n_labels = 68
else:
assert kind == 'volume'
inv = read_inverse_operator(fname_inv_vol)
inv['src'][0]['subject_his_id'] = 'sample' # modernize
labels = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
labels_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz')
n_labels = 46
src = inv['src']
assert src.kind == kind
assert src._subject == 'sample'
ave = read_evokeds(fname_evoked)[0].apply_baseline((None, 0)).crop(0, 0.01)
assert len(ave.times) == 4
stc = apply_inverse(ave, inv)
assert stc.subject == 'sample'
ltc = extract_label_time_course(stc, labels, src)
stc.subject = 'fsaverage'
with pytest.raises(ValueError, match=r'source spac.*not match.* stc\.sub'):
extract_label_time_course(stc, labels, src)
stc.subject = 'sample'
assert ltc.shape == (n_labels, 4)
if kind == 'volume':
with pytest.raises(RuntimeError, match='atlas.*not match.*source spa'):
extract_label_time_course(stc, labels_fs, src)
else:
with pytest.raises(ValueError, match=r'label\.sub.*not match.* stc\.'):
extract_label_time_course(stc, labels_fs, src)
stc.subject = None
with pytest.raises(ValueError, match=r'label\.sub.*not match.* sourc'):
extract_label_time_course(stc, labels_fs, src)
| bsd-3-clause |
pravsripad/mne-python | examples/inverse/evoked_ers_source_power.py | 11 | 5660 | # -*- coding: utf-8 -*-
"""
.. _ex-source-loc-methods:
=====================================================================
Compute evoked ERS source power using DICS, LCMV beamformer, and dSPM
=====================================================================
Here we examine 3 ways of localizing event-related synchronization (ERS) of
beta band activity in this dataset: :ref:`somato-dataset` using
:term:`DICS`, :term:`LCMV beamformer`, and :term:`dSPM` applied to active and
baseline covariance matrices.
"""
# Authors: Luke Bloy <luke.bloy@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# %%
import numpy as np
import mne
from mne.cov import compute_covariance
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import (make_dics, apply_dics_csd, make_lcmv,
apply_lcmv_cov)
from mne.minimum_norm import (make_inverse_operator, apply_inverse_cov)
print(__doc__)
# %%
# Reading the raw data and creating epochs:
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = (data_path / 'sub-{}'.format(subject) / 'meg' /
'sub-{}_task-{}_meg.fif'.format(subject, task))
# crop to 5 minutes to save memory
raw = mne.io.read_raw_fif(raw_fname).crop(0, 300)
# We are interested in the beta band (12-30 Hz)
raw.load_data().filter(12, 30)
# The DICS beamformer currently only supports a single sensor type.
# We'll use the gradiometers in this example.
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks,
preload=True, decim=3)
# Read forward operator and point to freesurfer subject directory
fname_fwd = (data_path / 'derivatives' / 'sub-{}'.format(subject) /
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = data_path / 'derivatives' / 'freesurfer' / 'subjects'
fwd = mne.read_forward_solution(fname_fwd)
# %%
# Compute covariances
# -------------------
# ERS activity starts at 0.5 seconds after stimulus onset. Because these
# data have been processed by MaxFilter directly (rather than MNE-Python's
# version), we have to be careful to compute the rank with a more conservative
# threshold in order to get the correct data rank (64). Once this is used in
# combination with an advanced covariance estimator like "shrunk", the rank
# will be correctly preserved.
rank = mne.compute_rank(epochs, tol=1e-6, tol_kind='relative')
active_win = (0.5, 1.5)
baseline_win = (-1, 0)
baseline_cov = compute_covariance(epochs, tmin=baseline_win[0],
tmax=baseline_win[1], method='shrunk',
rank=rank, verbose=True)
active_cov = compute_covariance(epochs, tmin=active_win[0], tmax=active_win[1],
method='shrunk', rank=rank, verbose=True)
# Weighted averaging is already in the addition of covariance objects.
common_cov = baseline_cov + active_cov
mne.viz.plot_cov(baseline_cov, epochs.info)
# %%
# Compute some source estimates
# -----------------------------
# Here we will use DICS, LCMV beamformer, and dSPM.
#
# See :ref:`ex-inverse-source-power` for more information about DICS.
def _gen_dics(active_win, baseline_win, epochs):
freqs = np.logspace(np.log10(12), np.log10(30), 9)
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0],
tmax=baseline_win[1], decim=20)
csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1],
decim=20)
filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power',
reduce_rank=True, real_filter=True, rank=rank)
stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters)
stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters)
stc_act /= stc_base
return stc_act
# generate lcmv source estimate
def _gen_lcmv(active_cov, baseline_cov, common_cov):
filters = make_lcmv(epochs.info, fwd, common_cov, reg=0.05,
noise_cov=None, pick_ori='max-power')
stc_base = apply_lcmv_cov(baseline_cov, filters)
stc_act = apply_lcmv_cov(active_cov, filters)
stc_act /= stc_base
return stc_act
# generate mne/dSPM source estimate
def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'):
inverse_operator = make_inverse_operator(info, fwd, common_cov)
stc_act = apply_inverse_cov(active_cov, info, inverse_operator,
method=method, verbose=True)
stc_base = apply_inverse_cov(baseline_cov, info, inverse_operator,
method=method, verbose=True)
stc_act /= stc_base
return stc_act
# Compute source estimates
stc_dics = _gen_dics(active_win, baseline_win, epochs)
stc_lcmv = _gen_lcmv(active_cov, baseline_cov, common_cov)
stc_dspm = _gen_mne(active_cov, baseline_cov, common_cov, fwd, epochs.info)
# %%
# Plot source estimates
# ---------------------
# DICS:
brain_dics = stc_dics.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='DICS source power in the 12-30 Hz frequency band')
# %%
# LCMV:
brain_lcmv = stc_lcmv.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='LCMV source power in the 12-30 Hz frequency band')
# %%
# dSPM:
brain_dspm = stc_dspm.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='dSPM source power in the 12-30 Hz frequency band')
| bsd-3-clause |
ekansa/open-context-py | opencontext_py/apps/imports/kobotoolbox/etl.py | 1 | 14934 | import csv
import uuid as GenUUID
import os, sys, shutil
import codecs
import numpy as np
import pandas as pd
from django.db import models
from django.db.models import Q
from django.conf import settings
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.imports.fields.models import ImportField
from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.imports.sources.models import ImportSource
from opencontext_py.apps.imports.kobotoolbox.utilities import (
UUID_SOURCE_KOBOTOOLBOX,
UUID_SOURCE_OC_KOBO_ETL,
UUID_SOURCE_OC_LOOKUP,
LINK_RELATION_TYPE_COL,
list_excel_files,
read_excel_to_dataframes,
make_directory_files_df,
drop_empty_cols,
clean_up_multivalue_cols,
reorder_first_columns,
lookup_manifest_uuid,
)
from opencontext_py.apps.imports.kobotoolbox.attributes import (
ATTRIBUTE_HIERARCHY_DELIM,
GRID_GROUPBY_COLS,
GRID_PROBLEM_COL,
X_Y_GRID_COLS,
create_grid_validation_columns,
create_global_lat_lon_columns,
process_hiearchy_col_values,
)
from opencontext_py.apps.imports.kobotoolbox.catalog import (
CATALOG_ATTRIBUTES_SHEET,
make_catalog_links_df,
prepare_catalog
)
from opencontext_py.apps.imports.kobotoolbox.contexts import (
context_sources_to_dfs,
preload_contexts_to_df,
prepare_all_contexts
)
from opencontext_py.apps.imports.kobotoolbox.media import (
prepare_media,
prepare_media_links_df
)
from opencontext_py.apps.imports.kobotoolbox.preprocess import (
FILENAME_ATTRIBUTES_LOCUS,
FILENAME_ATTRIBUTES_BULK_FINDS,
FILENAME_ATTRIBUTES_SMALL_FINDS,
FILENAME_ATTRIBUTES_TRENCH_BOOKS,
make_locus_stratigraphy_df,
prep_field_tables,
make_final_trench_book_relations_df
)
from opencontext_py.apps.imports.kobotoolbox.dbupdate import (
update_contexts_subjects,
load_attribute_df_into_importer,
load_attribute_data_into_oc,
load_link_relations_df_into_oc,
)
"""
from opencontext_py.apps.imports.kobotoolbox.etl import (
make_kobo_to_open_context_etl_files,
update_open_context_db,
update_link_rel_open_context_db
)
make_kobo_to_open_context_etl_files()
update_open_context_db()
update_link_rel_open_context_db()
source_ids = {
'kobo-pc-2018-all-contexts-subjects.csv',
'kobo-pc-2018-all-media',
'kobo-pc-2018-bulk-finds',
'kobo-pc-2018-catalog',
'kobo-pc-2018-links-catalog',
'kobo-pc-2018-links-locus-strat',
'kobo-pc-2018-links-media',
'kobo-pc-2018-links-trench-book',
'kobo-pc-2018-locus',
'kobo-pc-2018-small-finds',
'kobo-pc-2018-trench-book'
}
source_ids = {
'kobo-pc-2019-all-contexts-subjects.csv',
'kobo-pc-2019-all-media',
'kobo-pc-2019-bulk-finds',
'kobo-pc-2019-catalog',
'kobo-pc-2019-links-catalog',
'kobo-pc-2019-links-locus-strat',
'kobo-pc-2019-links-media',
'kobo-pc-2019-links-trench-book',
'kobo-pc-2019-locus',
'kobo-pc-2019-small-finds',
'kobo-pc-2019-trench-book',
}
"""
ETL_YEAR = 2019
ETL_LABEL = 'PC-{}'.format(ETL_YEAR)
PROJECT_UUID = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
SOURCE_PATH = settings.STATIC_IMPORTS_ROOT + 'pc-{}/'.format(ETL_YEAR)
DESTINATION_PATH = settings.STATIC_IMPORTS_ROOT + 'pc-{}/{}-oc-etl/'.format(ETL_YEAR, ETL_YEAR)
SOURCE_ID_PREFIX = 'kobo-pc-{}-'.format(ETL_YEAR)
MEDIA_BASE_URL = 'https://artiraq.org/static/opencontext/poggio-civitate/{}-media/'.format(ETL_YEAR)
MEDIA_FILES_PATH = settings.STATIC_IMPORTS_ROOT + 'pc-{}/attachments'.format(ETL_YEAR)
OC_TRANSFORMED_FILES_PATH = settings.STATIC_IMPORTS_ROOT + 'pc-{}/{}-media'.format(ETL_YEAR, ETL_YEAR)
FILENAME_ALL_CONTEXTS = 'all-contexts-subjects.csv'
FILENAME_ALL_MEDIA = 'all-media-files.csv'
FILENAME_LOADED_CONTEXTS = 'loaded--contexts-subjects.csv'
FILENAME_ATTRIBUTES_CATALOG = 'attributes--catalog.csv'
FILENAME_LINKS_MEDIA = 'links--media.csv'
FILENAME_LINKS_TRENCHBOOKS = 'links--trench-books.csv'
FILENAME_LINKS_STRATIGRAPHY = 'links--locus-stratigraphy.csv'
FILENAME_LINKS_CATALOG = 'links--catalog.csv'
GRID_PROBLEM_EXP_COLS = [
'label',
'class_uri',
'_uuid',
GRID_PROBLEM_COL,
] + GRID_GROUPBY_COLS
ATTRIBUTE_SOURCES = [
# (source_id, source_type, source_label, filename)
(SOURCE_ID_PREFIX + 'catalog', 'catalog', '{} Catalog'.format(ETL_LABEL), FILENAME_ATTRIBUTES_CATALOG,),
(SOURCE_ID_PREFIX + 'locus', 'locus', '{} Locus'.format(ETL_LABEL), FILENAME_ATTRIBUTES_LOCUS,),
(SOURCE_ID_PREFIX + 'bulk-finds', 'bulk-finds', '{} Bulk Finds'.format(ETL_LABEL), FILENAME_ATTRIBUTES_BULK_FINDS,),
(SOURCE_ID_PREFIX + 'small-finds', 'small-finds', '{} Small Finds'.format(ETL_LABEL), FILENAME_ATTRIBUTES_SMALL_FINDS,),
(SOURCE_ID_PREFIX + 'trench-book', 'trench-book', '{} Trench Book'.format(ETL_LABEL), FILENAME_ATTRIBUTES_TRENCH_BOOKS,),
(SOURCE_ID_PREFIX + 'all-media', 'all-media', '{} All Media'.format(ETL_LABEL), FILENAME_ALL_MEDIA,),
]
LINK_RELATIONS_SOURCES = [
(SOURCE_ID_PREFIX + 'links-media', FILENAME_LINKS_MEDIA,),
(SOURCE_ID_PREFIX + 'links-trench-book', FILENAME_LINKS_TRENCHBOOKS,),
(SOURCE_ID_PREFIX + 'links-locus-strat', FILENAME_LINKS_STRATIGRAPHY,),
(SOURCE_ID_PREFIX + 'links-catalog', FILENAME_LINKS_CATALOG,),
]
def write_grid_problem_csv(df, destination_path, filename):
"""Export the grid problem dataframe if needed """
if not GRID_PROBLEM_COL in df.columns:
# No grid problems in this DF
return None
bad_indx = (df[GRID_PROBLEM_COL].notnull())
if df[bad_indx].empty:
# No problem grid coordinates found
return None
df_report = df[bad_indx].copy()
all_tuple_cols = [(c[0] + ' ' + c[1]) for c in df_report.columns if isinstance(c, tuple)]
x_tuple_cols = [c for c in all_tuple_cols if 'Grid X' in c]
y_tuple_cols = [c for c in all_tuple_cols if 'Grid Y' in c]
tuple_renames = {
c:(c[0] + ' ' + c[1]) for c in df_report.columns if isinstance(c, tuple)
}
x_cols = [x for x, _ in X_Y_GRID_COLS if x in df_report.columns]
y_cols = [y for _, y in X_Y_GRID_COLS if y in df_report.columns]
df_report.rename(columns=tuple_renames, inplace=True)
df_report = df_report[(GRID_PROBLEM_EXP_COLS + x_cols + y_cols + x_tuple_cols + y_tuple_cols)]
df_report.sort_values(by=GRID_GROUPBY_COLS, inplace=True)
report_path = destination_path + 'bad-grid--' + filename
df_report.to_csv(
report_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
def add_context_subjects_label_class_uri(df, all_contexts_df):
"""Adds label and class_uri to df from all_contexts_df based on uuid join"""
join_df = all_contexts_df[['label', 'class_uri', 'uuid_source', 'context_uuid']].copy()
join_df.rename(columns={'context_uuid': '_uuid'}, inplace=True)
df_output = pd.merge(
df,
join_df,
how='left',
on=['_uuid']
)
df_output = reorder_first_columns(
df_output,
['label', 'class_uri', 'uuid_source']
)
return df_output
def make_kobo_to_open_context_etl_files(
project_uuid=PROJECT_UUID,
year=ETL_YEAR,
source_path=SOURCE_PATH,
destination_path=DESTINATION_PATH,
base_url=MEDIA_BASE_URL,
files_path=MEDIA_FILES_PATH,
oc_media_root_dir=OC_TRANSFORMED_FILES_PATH,
):
"""Prepares files for Open Context ingest."""
source_dfs = context_sources_to_dfs(source_path)
all_contexts_df = prepare_all_contexts(
project_uuid,
year,
source_dfs
)
all_contexts_path = destination_path + FILENAME_ALL_CONTEXTS
all_contexts_df.to_csv(
all_contexts_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
# Now prepare a consolidated, all media dataframe for all the media
# files referenced in all of the source datasets.
df_media_all = prepare_media(
source_path,
files_path,
oc_media_root_dir,
project_uuid,
base_url
)
all_media_csv_path = destination_path + FILENAME_ALL_MEDIA
df_media_all.to_csv(all_media_csv_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
# Now prepare a media links dataframe.
df_media_link = prepare_media_links_df(
source_path,
project_uuid,
all_contexts_df
)
if df_media_link is not None:
links_media_path = destination_path + FILENAME_LINKS_MEDIA
df_media_link.to_csv(
links_media_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
field_config_dfs = prep_field_tables(source_path, project_uuid, year)
for act_sheet, act_dict_dfs in field_config_dfs.items():
file_path = destination_path + act_dict_dfs['file']
df = act_dict_dfs['dfs'][act_sheet]
df = add_context_subjects_label_class_uri(
df,
all_contexts_df
)
# Add global coordinates if applicable.
df = create_grid_validation_columns(df)
write_grid_problem_csv(df, destination_path, act_dict_dfs['file'])
df = create_global_lat_lon_columns(df)
df.to_csv(file_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
# Now do the stratigraphy.
locus_dfs = field_config_dfs['Locus Summary Entry']['dfs']
df_strat = make_locus_stratigraphy_df(locus_dfs)
strat_path = destination_path + FILENAME_LINKS_STRATIGRAPHY
df_strat.to_csv(strat_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
# Prepare Trench Book relations
tb_dfs = field_config_dfs['Trench Book Entry']['dfs']
tb_all_rels_df = make_final_trench_book_relations_df(field_config_dfs, all_contexts_df)
tb_all_rels_path = destination_path + FILENAME_LINKS_TRENCHBOOKS
tb_all_rels_df.to_csv(tb_all_rels_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
# Prepare the catalog
catalog_dfs = prepare_catalog(project_uuid, source_path)
catalog_dfs[CATALOG_ATTRIBUTES_SHEET] = add_context_subjects_label_class_uri(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET],
all_contexts_df
)
catalog_dfs[CATALOG_ATTRIBUTES_SHEET] = process_hiearchy_col_values(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET]
)
# Clean up redundent data from the hierarchies
catalog_dfs[CATALOG_ATTRIBUTES_SHEET] = clean_up_multivalue_cols(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET],
delim=ATTRIBUTE_HIERARCHY_DELIM
)
# Add global coordinates to the catalog data.
catalog_dfs[CATALOG_ATTRIBUTES_SHEET] = create_grid_validation_columns(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET]
)
write_grid_problem_csv(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET],
destination_path,
FILENAME_ATTRIBUTES_CATALOG
)
catalog_dfs[CATALOG_ATTRIBUTES_SHEET] = create_global_lat_lon_columns(
catalog_dfs[CATALOG_ATTRIBUTES_SHEET]
)
attribs_catalog_path = destination_path + FILENAME_ATTRIBUTES_CATALOG
catalog_dfs[CATALOG_ATTRIBUTES_SHEET].to_csv(
attribs_catalog_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
catalog_links_df = make_catalog_links_df(
project_uuid,
catalog_dfs,
tb_dfs['Trench Book Entry'],
all_contexts_df
)
links_catalog_path = destination_path + FILENAME_LINKS_CATALOG
catalog_links_df.to_csv(
links_catalog_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
def update_subjects_context_open_context_db(
project_uuid=PROJECT_UUID,
source_prefix=SOURCE_ID_PREFIX,
load_files=DESTINATION_PATH,
all_contexts_file=FILENAME_ALL_CONTEXTS,
loaded_contexts_file=FILENAME_LOADED_CONTEXTS,
):
"""Loads subjects, contexts items and containment relations"""
all_contexts_df = pd.read_csv((load_files + all_contexts_file))
new_contexts_df = update_contexts_subjects(
project_uuid,
(source_prefix + all_contexts_file),
all_contexts_df
)
loaded_contexts_path = (load_files + loaded_contexts_file)
new_contexts_df.to_csv(
loaded_contexts_path,
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
def update_attributes_open_context_db(
project_uuid=PROJECT_UUID,
source_prefix=SOURCE_ID_PREFIX,
load_files=DESTINATION_PATH,
attribute_sources=ATTRIBUTE_SOURCES,
):
# Load attribute data into the importer
for source_id, source_type, source_label, filename in attribute_sources:
df = pd.read_csv((load_files + filename))
load_attribute_df_into_importer(
project_uuid,
source_id,
source_type,
source_label,
df
)
# Now actually import the data into Open Context
for source_id, _, _, _ in attribute_sources:
load_attribute_data_into_oc(project_uuid, source_id)
def update_link_rel_open_context_db(
project_uuid=PROJECT_UUID,
source_prefix=SOURCE_ID_PREFIX,
load_files=DESTINATION_PATH,
link_sources=LINK_RELATIONS_SOURCES,
loaded_link_file_prefix='loaded--',
):
"""Loads linking relationships into the database"""
for source_id, filename in link_sources:
df = pd.read_csv((load_files + filename))
df = load_link_relations_df_into_oc(
project_uuid,
source_id,
df
)
df.to_csv(
(load_files + loaded_link_file_prefix + filename),
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
def update_open_context_db(
project_uuid=PROJECT_UUID,
source_prefix=SOURCE_ID_PREFIX,
load_files=DESTINATION_PATH,
all_contexts_file=FILENAME_ALL_CONTEXTS,
loaded_contexts_file=FILENAME_LOADED_CONTEXTS,
attribute_sources=ATTRIBUTE_SOURCES,
link_sources=LINK_RELATIONS_SOURCES
):
""""Updates the Open Context database with ETL load files"""
# First add subjects / contexts and their containment relations
update_subjects_context_open_context_db(
project_uuid=project_uuid,
source_prefix=source_prefix,
load_files=load_files,
all_contexts_file=all_contexts_file,
loaded_contexts_file=loaded_contexts_file,
)
# Load attribute data into the importer, then import them into
# Open Context.
update_attributes_open_context_db(
project_uuid=project_uuid,
source_prefix=source_prefix,
load_files=load_files,
attribute_sources=attribute_sources
)
# Load link relationships into the Open Context database.
update_link_rel_open_context_db(
project_uuid=project_uuid,
source_prefix=source_prefix,
load_files=load_files,
link_sources=link_sources
)
| gpl-3.0 |
pravsripad/mne-python | mne/decoding/base.py | 2 | 18682 | """Base class copy from sklearn.base."""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Romain Trachel <trachelr@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import datetime as dt
import numbers
from ..parallel import parallel_func
from ..fixes import BaseEstimator, is_classifier, _get_check_scoring
from ..utils import warn, verbose
class LinearModel(BaseEstimator):
"""Compute and store patterns from linear models.
The linear model coefficients (filters) are used to extract discriminant
neural sources from the measured data. This class computes the
corresponding patterns of these linear filters to make them more
interpretable :footcite:`HaufeEtAl2014`.
Parameters
----------
model : object | None
A linear model from scikit-learn with a fit method
that updates a ``coef_`` attribute.
If None the model will be LogisticRegression.
Attributes
----------
filters_ : ndarray, shape ([n_targets], n_features)
If fit, the filters used to decompose the data.
patterns_ : ndarray, shape ([n_targets], n_features)
If fit, the patterns used to restore M/EEG signals.
See Also
--------
CSP
mne.preprocessing.ICA
mne.preprocessing.Xdawn
Notes
-----
.. versionadded:: 0.10
References
----------
.. footbibliography::
"""
def __init__(self, model=None): # noqa: D102
if model is None:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver='liblinear')
self.model = model
self._estimator_type = getattr(model, "_estimator_type", None)
def fit(self, X, y, **fit_params):
"""Estimate the coefficients of the linear model.
Save the coefficients in the attribute ``filters_`` and
computes the attribute ``patterns_``.
Parameters
----------
X : array, shape (n_samples, n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples, [n_targets])
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
self : instance of LinearModel
Returns the modified instance.
"""
X, y = np.asarray(X), np.asarray(y)
if X.ndim != 2:
raise ValueError('LinearModel only accepts 2-dimensional X, got '
'%s instead.' % (X.shape,))
if y.ndim > 2:
raise ValueError('LinearModel only accepts up to 2-dimensional y, '
'got %s instead.' % (y.shape,))
# fit the Model
self.model.fit(X, y, **fit_params)
# Computes patterns using Haufe's trick: A = Cov_X . W . Precision_Y
inv_Y = 1.
X = X - X.mean(0, keepdims=True)
if y.ndim == 2 and y.shape[1] != 1:
y = y - y.mean(0, keepdims=True)
inv_Y = np.linalg.pinv(np.cov(y.T))
self.patterns_ = np.cov(X.T).dot(self.filters_.T.dot(inv_Y)).T
return self
@property
def filters_(self):
if hasattr(self.model, 'coef_'):
# Standard Linear Model
filters = self.model.coef_
elif hasattr(self.model.best_estimator_, 'coef_'):
# Linear Model with GridSearchCV
filters = self.model.best_estimator_.coef_
else:
raise ValueError('model does not have a `coef_` attribute.')
if filters.ndim == 2 and filters.shape[0] == 1:
filters = filters[0]
return filters
def transform(self, X):
"""Transform the data using the linear model.
Parameters
----------
X : array, shape (n_samples, n_features)
The data to transform.
Returns
-------
y_pred : array, shape (n_samples,)
The predicted targets.
"""
return self.model.transform(X)
def fit_transform(self, X, y):
"""Fit the data and transform it using the linear model.
Parameters
----------
X : array, shape (n_samples, n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples,)
The target values.
Returns
-------
y_pred : array, shape (n_samples,)
The predicted targets.
"""
return self.fit(X, y).transform(X)
def predict(self, X):
"""Compute predictions of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples,)
The predictions.
"""
return self.model.predict(X)
def predict_proba(self, X):
"""Compute probabilistic predictions of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples, n_classes)
The probabilities.
"""
return self.model.predict_proba(X)
def decision_function(self, X):
"""Compute distance from the decision function of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples, n_classes)
The distances.
"""
return self.model.decision_function(X)
def score(self, X, y):
"""Score the linear model computed on the given test data.
Parameters
----------
X : array, shape (n_samples, n_features)
The data to transform.
y : array, shape (n_samples,)
The target values.
Returns
-------
score : float
Score of the linear model.
"""
return self.model.score(X, y)
def _set_cv(cv, estimator=None, X=None, y=None):
"""Set the default CV depending on whether clf is classifier/regressor."""
# Detect whether classification or regression
if estimator in ['classifier', 'regressor']:
est_is_classifier = estimator == 'classifier'
else:
est_is_classifier = is_classifier(estimator)
# Setup CV
from sklearn import model_selection as models
from sklearn.model_selection import (check_cv, StratifiedKFold, KFold)
if isinstance(cv, (int, np.int64)):
XFold = StratifiedKFold if est_is_classifier else KFold
cv = XFold(n_splits=cv)
elif isinstance(cv, str):
if not hasattr(models, cv):
raise ValueError('Unknown cross-validation')
cv = getattr(models, cv)
cv = cv()
cv = check_cv(cv=cv, y=y, classifier=est_is_classifier)
# Extract train and test set to retrieve them at predict time
cv_splits = [(train, test) for train, test in
cv.split(X=np.zeros_like(y), y=y)]
if not np.all([len(train) for train, _ in cv_splits]):
raise ValueError('Some folds do not have any train epochs.')
return cv, cv_splits
def _check_estimator(estimator, get_params=True):
"""Check whether an object has the methods required by sklearn."""
valid_methods = ('predict', 'transform', 'predict_proba',
'decision_function')
if (
(not hasattr(estimator, 'fit')) or
(not any(hasattr(estimator, method) for method in valid_methods))
):
raise ValueError('estimator must be a scikit-learn transformer or '
'an estimator with the fit and a predict-like (e.g. '
'predict_proba) or a transform method.')
if get_params and not hasattr(estimator, 'get_params'):
raise ValueError('estimator must be a scikit-learn transformer or an '
'estimator with the get_params method that allows '
'cloning.')
def _get_inverse_funcs(estimator, terminal=True):
"""Retrieve the inverse functions of an pipeline or an estimator."""
inverse_func = [False]
if hasattr(estimator, 'steps'):
# if pipeline, retrieve all steps by nesting
inverse_func = list()
for _, est in estimator.steps:
inverse_func.extend(_get_inverse_funcs(est, terminal=False))
elif hasattr(estimator, 'inverse_transform'):
# if not pipeline attempt to retrieve inverse function
inverse_func = [estimator.inverse_transform]
# If terminal node, check that that the last estimator is a classifier,
# and remove it from the transformers.
if terminal:
last_is_estimator = inverse_func[-1] is False
all_invertible = not(False in inverse_func[:-1])
if last_is_estimator and all_invertible:
# keep all inverse transformation and remove last estimation
inverse_func = inverse_func[:-1]
else:
inverse_func = list()
return inverse_func
def get_coef(estimator, attr='filters_', inverse_transform=False):
"""Retrieve the coefficients of an estimator ending with a Linear Model.
This is typically useful to retrieve "spatial filters" or "spatial
patterns" of decoding models :footcite:`HaufeEtAl2014`.
Parameters
----------
estimator : object | None
An estimator from scikit-learn.
attr : str
The name of the coefficient attribute to retrieve, typically
``'filters_'`` (default) or ``'patterns_'``.
inverse_transform : bool
If True, returns the coefficients after inverse transforming them with
the transformer steps of the estimator.
Returns
-------
coef : array
The coefficients.
References
----------
.. footbibliography::
"""
# Get the coefficients of the last estimator in case of nested pipeline
est = estimator
while hasattr(est, 'steps'):
est = est.steps[-1][1]
squeeze_first_dim = False
# If SlidingEstimator, loop across estimators
if hasattr(est, 'estimators_'):
coef = list()
for this_est in est.estimators_:
coef.append(get_coef(this_est, attr, inverse_transform))
coef = np.transpose(coef)
coef = coef[np.newaxis] # fake a sample dimension
squeeze_first_dim = True
elif not hasattr(est, attr):
raise ValueError('This estimator does not have a %s attribute:\n%s'
% (attr, est))
else:
coef = getattr(est, attr)
if coef.ndim == 1:
coef = coef[np.newaxis]
squeeze_first_dim = True
# inverse pattern e.g. to get back physical units
if inverse_transform:
if not hasattr(estimator, 'steps') and not hasattr(est, 'estimators_'):
raise ValueError('inverse_transform can only be applied onto '
'pipeline estimators.')
# The inverse_transform parameter will call this method on any
# estimator contained in the pipeline, in reverse order.
for inverse_func in _get_inverse_funcs(estimator)[::-1]:
coef = inverse_func(coef)
if squeeze_first_dim:
coef = coef[0]
return coef
@verbose
def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None,
cv=None, n_jobs=None, verbose=None, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation.
Parameters
----------
estimator : instance of sklearn.base.BaseEstimator
The object to use to fit the data.
Must implement the 'fit' method.
X : array-like, shape (n_samples, n_dimensional_features,)
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, shape (n_samples, n_targets,)
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : str, callable | None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
Note that when using an estimator which inherently returns
multidimensional output - in particular, SlidingEstimator
or GeneralizingEstimator - you should set the scorer
there, not here.
cv : int, cross-validation generator | iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a ``(Stratified)KFold``,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
%(n_jobs)s
%(verbose)s
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or str, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape (n_splits,) | shape (n_splits, n_scores)
Array of scores of the estimator for each run of the cross validation.
"""
# This code is copied from sklearn
from sklearn.base import clone
from sklearn.utils import indexable
from sklearn.model_selection._split import check_cv
check_scoring = _get_check_scoring()
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
# Note: this parallelization is implemented using MNE Parallel
parallel, p_func, n_jobs = parallel_func(_fit_and_score, n_jobs,
pre_dispatch=pre_dispatch)
scores = parallel(
p_func(
estimator=clone(estimator), X=X, y=y, scorer=scorer, train=train,
test=test, parameters=None, fit_params=fit_params
) for train, test in cv_iter
)
return np.array(scores)[:, 0, ...] # flatten over joblib output.
def _fit_and_score(estimator, X, y, scorer, train, test,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split."""
# This code is adapted from sklearn
from ..fixes import _check_fit_params
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.validation import _num_samples
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
estimator.set_params(**parameters)
start_time = dt.datetime.now()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_duration = dt.datetime.now() - start_time
score_duration = dt.timedelta(0)
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e))
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_duration = dt.datetime.now() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_duration = dt.datetime.now() - start_time - fit_duration
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([
fit_duration.total_seconds(),
score_duration.total_seconds()
])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set.
This code is the same as sklearn.model_selection._validation._score
but accepts to output arrays instead of floats.
"""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
return score
| bsd-3-clause |
moonbury/notebooks | github/MasteringPandas/2060_11_Code/run_logistic_regression_titanic.py | 3 | 2756 | #!/home/femibyte/local/anaconda/bin/python
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from patsy import dmatrix, dmatrices
import re
train_df = pd.read_csv('csv/train.csv', header=0)
test_df = pd.read_csv('csv/test.csv', header=0)
formula1 = 'C(Pclass) + C(Sex) + Fare'
formula2 = 'C(Pclass) + C(Sex)'
formula3 = 'C(Sex)'
formula4 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch'
formula5 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch + C(Embarked)'
formula_map = {'PClass_Sex_Fare' : formula1,
'PClass_Sex' : formula2,
'Sex' : formula3,
'PClass_Sex_Age_Sibsp_Parch' : formula4,
'PClass_Sex_Age_Sibsp_Parch_Embarked' : formula5
}
def main():
train_df_filled=fill_null_vals(train_df,'Fare')
train_df_filled=fill_null_vals(train_df_filled,'Age')
assert len(train_df_filled)==len(train_df)
test_df_filled=fill_null_vals(test_df,'Fare')
test_df_filled=fill_null_vals(test_df_filled,'Age')
assert len(test_df_filled)==len(test_df)
for formula_name, formula in formula_map.iteritems():
print "name=%s formula=%s" % (formula_name,formula)
y_train,X_train = dmatrices('Survived ~ ' + formula,
train_df_filled,return_type='dataframe')
print "Running logistic regression with formula : %s" % formula
print "X_train cols=%s " % X_train.columns
y_train = np.ravel(y_train)
model = LogisticRegression()
lr_model = model.fit(X_train, y_train)
print "Training score:%s" % lr_model.score(X_train,y_train)
X_test=dmatrix(formula,test_df_filled)
predicted=lr_model.predict(X_test)
print "predicted:%s\n" % predicted[:5]
assert len(predicted)==len(test_df)
pred_results=pd.Series(predicted,name='Survived')
lr_results=pd.concat([test_df['PassengerId'],pred_results],axis=1)
lr_results.Survived=lr_results.Survived.astype(int)
results_file='csv/logisticregr_%s.csv' % formula_name
#results_file = re.sub('[+ ()C]','',results_file)
lr_results.to_csv(results_file,index=False)
def fill_null_vals(df,col_name):
null_passengers=df[df[col_name].isnull()]
passenger_id_list=null_passengers['PassengerId'].tolist()
df_filled=df.copy()
for pass_id in passenger_id_list:
idx=df[df['PassengerId']==pass_id].index[0]
similar_passengers=df[(df['Sex']==null_passengers['Sex'][idx]) & (df['Pclass']==null_passengers['Pclass'][idx])]
mean_val=np.mean(similar_passengers[col_name].dropna())
df_filled.loc[idx,col_name]=mean_val
return df_filled
if __name__ == '__main__':
main()
| gpl-3.0 |
nhuntwalker/astroML | book_figures/chapter7/fig_spec_examples.py | 4 | 2725 | """
SDSS spectra Examples
---------------------
Figure 7.1
A sample of 15 galaxy spectra selected from the SDSS spectroscopic data set
(see Section 1.5.5). These spectra span a range of galaxy types, from
star-forming to passive galaxies. Each spectrum has been shifted to its rest
frame and covers the wavelength interval 3000-8000 Angstroms. The specific
fluxes, :math:`F_\lambda(\lambda)`, on the ordinate axes have an arbitrary
scaling.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# Use pre-computed PCA to reconstruct spectra
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
lam = sdss_corrected_spectra.compute_wavelengths(data)
#------------------------------------------------------------
# select random spectra
np.random.seed(5)
nrows = 5
ncols = 3
ind = np.random.randint(spectra.shape[0], size=nrows * ncols)
spec_sample = spectra[ind]
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 4))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
for i in range(ncols):
for j in range(nrows):
ax = fig.add_subplot(nrows, ncols, ncols * j + 1 + i)
ax.plot(lam, spec_sample[ncols * j + i], '-k', lw=1)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < nrows - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
plt.xlabel(r'wavelength $(\AA)$')
ax.set_xlim(3000, 7999)
ylim = ax.get_ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + dy)
plt.show()
| bsd-2-clause |
LambentLight/571final-fnc | data_setup/cluster.py | 1 | 9923 | import random
import math
import copy
import time
# clustering
class Unsupervised:
def __init__(self, clusters, original_image):
self.NUMBER_OF_FEATURES = 44
self.clusters = clusters
self.original_image = original_image
self.image = original_image.copy()
self.image_width = len(original_image)
self.image_height = 0
self.classification = [0 for x in range(self.image_width)] # every pixel location will have a classification from 0 to clusters-1
# Generate original cluster centers
self.cluster_centers = [[0 for x in range(self.NUMBER_OF_FEATURES)] for y in range(self.clusters)] # clusters x 3 vector with each randomly generated cluster center
for center in self.cluster_centers:
for feature in range(self.NUMBER_OF_FEATURES):
center[feature] = random.uniform(-1, 1)
@staticmethod
def euclidean(a, b):
if len(a) != len(b):
print("Euclidean math needs help")
exit(1)
sum = 0
for i in range(len(a)):
diff = a[i] - b[i]
sum += math.pow(diff, 2)
return math.sqrt(sum)
# Assigns every image pixel to the closest center
def assign_samples(self, distance):
for pixel_j in range(self.image_width):
min_distance = float("inf")
min_cluster = -1
for cluster, center in enumerate(self.cluster_centers):
new_distance = distance(center, self.image[pixel_j])
if new_distance < min_distance:
min_distance = new_distance
min_cluster = cluster
self.classification[pixel_j] = min_cluster
def different(self, old_classification, new_classification):
for pixel_j in range(self.image_width):
if old_classification[pixel_j] != new_classification[pixel_j]:
#print("Difference found: ", pixel_j, old_classification[pixel_j], new_classification[pixel_j])
return True
return False
def reduce_image(self, picture_name):
print("Reducing Image")
for pixel_j in range(self.image_width):
self.image[pixel_j] = self.cluster_centers[self.classification[pixel_j]]
def kmeans_cluster(self):
# store old classification
old_classification = copy.deepcopy(self.classification)
# assign samples
self.assign_samples(self.euclidean)
print("Initial assignment done")
timing_sum = 0
# while there are differences
iteration = 1
while self.different(old_classification, self.classification):
start_time = time.time()
print("Iteration: ", iteration)
iteration += 1
# store old classification
old_classification = copy.deepcopy(self.classification)
# calculate new cluster centers
self.kmeans_centers()
# assign samples
self.assign_samples(self.euclidean)
timing_sum += time.time() - start_time
print(time.time() - start_time)
timing_average = timing_sum / (iteration * 1.0)
with open('kmeans_performance.txt', 'a') as f:
f.write('{} {} {}\n'.format(self.clusters, iteration, timing_average))
def kmeans_centers(self):
# vector of RGB clusters initialized to 0
for center in self.cluster_centers:
for feature in range(self.NUMBER_OF_FEATURES):
center[feature] = 0
# iterate over pixels
mean_counts = [0] * self.clusters
for pixel_j in range(self.image_width):
cluster = self.classification[pixel_j]
# add pixel values to that index in vector of RGB clusters
mean_counts[cluster] += 1
for feature in range(self.NUMBER_OF_FEATURES):
self.cluster_centers[cluster][feature] += self.image[pixel_j][feature]
# divide by number of samples in cluster
for cluster, center in enumerate(self.cluster_centers):
if mean_counts[cluster] == 0:
mean_counts[cluster] = 1
for feature in range(self.NUMBER_OF_FEATURES):
center[feature] /= mean_counts[cluster]
def winner_cluster(self):
# store old classification
old_classification = copy.deepcopy(self.classification)
# assign samples
start_time = time.time()
self.assign_samples(self.euclidean)
print("Initial assignment done")
timing_sum = 0
# while there are differences
iteration = 1
while self.different(old_classification, self.classification):
start_time = time.time()
print("Iteration: ", iteration)
iteration += 1
# store old classification
old_classification = copy.deepcopy(self.classification)
# calculate new cluster centers
self.winner_centers()
# assign samples
self.assign_samples(self.euclidean)
timing_sum += time.time() - start_time
time.time() - start_time
timing_average = timing_sum / (iteration * 1.0)
with open('winner_performance.txt', 'a') as f:
f.write('{} {} {}\n'.format(self.clusters, iteration, timing_average))
def winner_centers(self, learning_rate=0.01):
# iterate over pixels
for pixel_j in range(self.image_width):
cluster = self.classification[pixel_j]
# add pixel values to that index in vector of RGB clusters
for feature in range(self.NUMBER_OF_FEATURES):
self.cluster_centers[cluster][feature] += learning_rate * (self.image[pixel_j][feature] - self.cluster_centers[cluster][feature])
def kohonen_cluster(self):
# store old classification
old_classification = copy.deepcopy(self.classification)
# assign samples
self.assign_samples(self.euclidean)
print("Initial assignment done")
timing_sum = 0
# while there are differences
iteration = 1
while self.different(old_classification, self.classification):
start_time = time.time()
print("Iteration: ", iteration)
iteration += 1
# store old classification
old_classification = copy.deepcopy(self.classification)
# calculate new cluster centers
self.kohonen_centers()
# assign samples
self.assign_samples(self.euclidean)
timing_sum += time.time() - start_time
time.time() - start_time
timing_average = timing_sum / (iteration * 1.0)
with open('kohonen_performance.txt', 'a') as f:
f.write('{} {} {}\n'.format(self.clusters, iteration, timing_average))
def kohonen_centers(self, learning_rate=0.01):
# iterate over pixels
for pixel_j in range(self.image_width):
winning_cluster = self.classification[pixel_j]
for index, cluster in enumerate(self.cluster_centers):
for feature in range(self.NUMBER_OF_FEATURES):
cluster[feature] += learning_rate * self.closeness(winning_cluster, index) * (self.image[pixel_j][feature] - cluster[feature])
def closeness(self, winning_cluster, other_cluster, variance=1.0):
return math.exp((-1.0 * pow(self.topological_distance(winning_cluster, other_cluster), 2.0)) / (2.0 * variance))
def topological_distance(self, winning_cluster, other_cluster):
return winning_cluster - other_cluster
# K-means
'''
kmeans = Unsupervised(4, )
kmeans.kmeans_cluster()
kmeans.reduce_image('images/k-means{}.ppm'.format(clusters))
'''
# Winner take all
'''
for i in range(0, 9):
clusters = pow(2, i)
print "Winner, {} clusters".format(clusters)
winner = Unsupervised(clusters, copy.deepcopy(original_image))
winner.winner_cluster()
winner.reduce_image('images/winner{}.ppm'.format(clusters))
'''
# Kohonen
'''
for i in range(0, 9):
clusters = pow(2, i)
print "Kohonen, {} clusters".format(clusters)
kohonen = Unsupervised(clusters, copy.deepcopy(original_image))
kohonen.kohonen_cluster()
kohonen.reduce_image('images/kohonen{}.ppm'.format(clusters))
'''
# Mean shift
'''
import numpy as np
from sklearn.cluster import MeanShift
window_sizes = [11, 12, 13, 14]
for window_size in window_sizes:
print "Mean Shift, window size {}".format(window_size)
ms = MeanShift(bandwidth=window_size, bin_seeding=True)
copy_image = copy.deepcopy(original_image)
X = np.zeros((480*480, 3))
# iterate over pixels
for pixel_i in range(480):
for pixel_j in range(480):
X[pixel_i * 480 + pixel_j][RED] = copy_image[pixel_i][pixel_j][RED]
X[pixel_i * 480 + pixel_j][GREEN] = copy_image[pixel_i][pixel_j][GREEN]
X[pixel_i * 480 + pixel_j][BLUE] = copy_image[pixel_i][pixel_j][BLUE]
print X
start_time = time.time()
ms.fit(X)
labels = ms.labels_
print labels
cluster_centers = ms.cluster_centers_
with open('means_performance.txt', 'a') as f:
f.write('{} {} {}\n'.format(window_size, time.time() - start_time, len(cluster_centers)))
print("number of estimated clusters : {}".format(len(cluster_centers)))
print cluster_centers
print "Reducing Image"
for pixel_i in range(480):
for pixel_j in range(480):
copy_image[pixel_i][pixel_j][RED] = cluster_centers[labels[pixel_i * 480 + pixel_j]][RED]
copy_image[pixel_i][pixel_j][GREEN] = cluster_centers[labels[pixel_i * 480 + pixel_j]][GREEN]
copy_image[pixel_i][pixel_j][BLUE] = cluster_centers[labels[pixel_i * 480 + pixel_j]][BLUE]
imsave('mean_shift{}.ppm'.format(window_size), copy_image)
'''
| apache-2.0 |
aleju/self-driving-truck | train_reinforced/visualization.py | 1 | 14172 | from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib import actions as actionslib
from lib import util
from lib.util import to_numpy
import imgaug as ia
import numpy as np
import torch.nn.functional as F
try:
xrange
except NameError:
xrange = range
def generate_overview_image(current_state, last_state, \
action_up_down_bpe, action_left_right_bpe, \
memory, memory_val, \
ticks, last_train_tick, \
plans, plan_to_rewards_direct, plan_to_reward_indirect, \
plan_to_reward, plans_ranking, current_plan, best_plan_ae_decodings,
idr_v, idr_adv,
grids, args):
h, w = current_state.screenshot_rs.shape[0:2]
scr = np.copy(current_state.screenshot_rs)
scr = ia.imresize_single_image(scr, (h//2, w//2))
if best_plan_ae_decodings is not None:
ae_decodings = (to_numpy(best_plan_ae_decodings) * 255).astype(np.uint8).transpose((0, 2, 3, 1))
ae_decodings = [ia.imresize_single_image(ae_decodings[i, ...], (h//4, w//4)) for i in xrange(ae_decodings.shape[0])]
ae_decodings = ia.draw_grid(ae_decodings, cols=5)
#ae_decodings = np.vstack([
# np.hstack(ae_decodings[0:5]),
# np.hstack(ae_decodings[5:10])
#])
else:
ae_decodings = np.zeros((1, 1, 3), dtype=np.uint8)
if grids is not None:
scr_rs = ia.imresize_single_image(scr, (h//4, w//4))
grids = (to_numpy(grids)[0] * 255).astype(np.uint8)
grids = [ia.imresize_single_image(grids[i, ...][:,:,np.newaxis], (h//4, w//4)) for i in xrange(grids.shape[0])]
grids = [util.draw_heatmap_overlay(scr_rs, np.squeeze(grid/255).astype(np.float32)) for grid in grids]
grids = ia.draw_grid(grids, cols=4)
else:
grids = np.zeros((1, 1, 3), dtype=np.uint8)
plans_text = []
if idr_v is not None and idr_adv is not None:
idr_v = to_numpy(idr_v[0])
idr_adv = to_numpy(idr_adv[0])
plans_text.append("V(s): %+07.2f" % (idr_v[0],))
adv_texts = []
curr = []
for i, ma in enumerate(actionslib.ALL_MULTIACTIONS):
curr.append("A(%s%s): %+07.2f" % (ma[0] if ma[0] != "~WS" else "_", ma[1] if ma[1] != "~AD" else "_", idr_adv[i]))
if (i+1) % 3 == 0 or (i+1) == len(actionslib.ALL_MULTIACTIONS):
adv_texts.append(" ".join(curr))
curr = []
plans_text.extend(adv_texts)
if current_plan is not None:
plans_text.append("")
plans_text.append("Current Plan:")
actions_ud_text = []
actions_lr_text = []
for multiaction in current_plan:
actions_ud_text.append("%s" % (multiaction[0] if multiaction[0] != "~WS" else "_",))
actions_lr_text.append("%s" % (multiaction[1] if multiaction[1] != "~AD" else "_",))
plans_text.extend([" ".join(actions_ud_text), " ".join(actions_lr_text)])
plans_text.append("")
plans_text.append("Best Plans:")
if plan_to_rewards_direct is not None:
for plan_idx in plans_ranking[::-1][0:5]:
plan = plans[plan_idx]
rewards_direct = plan_to_rewards_direct[plan_idx]
reward_indirect = plan_to_reward_indirect[plan_idx]
reward = plan_to_reward[plan_idx]
actions_ud_text = []
actions_lr_text = []
rewards_text = []
for multiaction in plan:
actions_ud_text.append("%s" % (multiaction[0] if multiaction[0] != "~WS" else "_",))
actions_lr_text.append("%s" % (multiaction[1] if multiaction[1] != "~AD" else "_",))
for rewards_t in rewards_direct:
rewards_text.append("%+04.1f" % (rewards_t,))
rewards_text.append("| %+07.2f (V(s')=%+07.2f)" % (reward, reward_indirect))
plans_text.extend(["", " ".join(actions_ud_text), " ".join(actions_lr_text), " ".join(rewards_text)])
plans_text = "\n".join(plans_text)
stats_texts = [
"u/d bpe: %s" % (action_up_down_bpe.rjust(5)),
" l/r bpe: %s" % (action_left_right_bpe.rjust(5)),
"u/d ape: %s %s" % (current_state.action_up_down.rjust(5), "[C]" if action_up_down_bpe != current_state.action_up_down else ""),
" l/r ape: %s %s" % (current_state.action_left_right.rjust(5), "[C]" if action_left_right_bpe != current_state.action_left_right else ""),
"speed: %03d" % (current_state.speed,) if current_state.speed is not None else "speed: None",
"is_reverse: yes" if current_state.is_reverse else "is_reverse: no",
"is_damage_shown: yes" if current_state.is_damage_shown else "is_damage_shown: no",
"is_offence_shown: yes" if current_state.is_offence_shown else "is_offence_shown: no",
"steering wheel: %05.2f (%05.2f)" % (current_state.steering_wheel_cnn, current_state.steering_wheel_raw_cnn),
"reward for last state: %05.2f" % (last_state.reward,) if last_state is not None else "reward for last state: None",
"p_explore: %.2f%s" % (current_state.p_explore if args.p_explore is None else args.p_explore, "" if args.p_explore is None else " (constant)"),
"memory size (train/val): %06d / %06d" % (memory.size, memory_val.size),
"ticks: %06d" % (ticks,),
"last train: %06d" % (last_train_tick,)
]
stats_text = "\n".join(stats_texts)
all_texts = plans_text + "\n\n\n" + stats_text
result = np.zeros((720, 590, 3), dtype=np.uint8)
util.draw_image(result, x=0, y=0, other_img=scr, copy=False)
util.draw_image(result, x=0, y=scr.shape[0]+10, other_img=ae_decodings, copy=False)
util.draw_image(result, x=0, y=scr.shape[0]+10+ae_decodings.shape[0]+10, other_img=grids, copy=False)
result = util.draw_text(result, x=0, y=scr.shape[0]+10+ae_decodings.shape[0]+10+grids.shape[0]+10, size=8, text=all_texts, color=[255, 255, 255])
return result
def generate_training_debug_image(inputs_supervised, inputs_supervised_prev, \
outputs_dr_preds, outputs_dr_gt, \
outputs_idr_preds, outputs_idr_gt, \
outputs_successor_preds, outputs_successor_gt, \
outputs_ae_preds, outputs_ae_gt, \
outputs_dr_successors_preds, outputs_dr_successors_gt, \
outputs_idr_successors_preds, outputs_idr_successors_gt,
multiactions):
imgs_in = to_numpy(inputs_supervised)[0]
imgs_in = np.clip(imgs_in * 255, 0, 255).astype(np.uint8).transpose((1, 2, 0))
imgs_in_prev = to_numpy(inputs_supervised_prev)[0]
imgs_in_prev = np.clip(imgs_in_prev * 255, 0, 255).astype(np.uint8).transpose((1, 2, 0))
h, w = imgs_in.shape[0:2]
imgs_in = np.vstack([
np.hstack([downscale(imgs_in[..., 0:3]), downscale(to_rgb(imgs_in_prev[..., 0]))]),
#np.hstack([downscale(to_rgb(imgs_in_prev[..., 1])), downscale(to_rgb(imgs_in_prev[..., 2]))])
np.hstack([downscale(to_rgb(imgs_in_prev[..., 1])), np.zeros_like(imgs_in[..., 0:3])])
])
h_imgs = imgs_in.shape[0]
ae_gt = np.clip(to_numpy(outputs_ae_gt)[0] * 255, 0, 255).astype(np.uint8).transpose((1, 2, 0))
ae_preds = np.clip(to_numpy(outputs_ae_preds)[0] * 255, 0, 255).astype(np.uint8).transpose((1, 2, 0))
"""
imgs_ae = np.vstack([
downscale(ae_preds[..., 0:3]),
downscale(to_rgb(ae_preds[..., 3])),
downscale(to_rgb(ae_preds[..., 4])),
downscale(to_rgb(ae_preds[..., 5]))
])
"""
imgs_ae = np.hstack([downscale(ae_gt), downscale(ae_preds)])
h_ae = imgs_ae.shape[0]
outputs_successor_dr_grid = draw_successor_dr_grid(
to_numpy(F.softmax(outputs_dr_successors_preds[:, 0, :])),
to_numpy(outputs_dr_successors_gt[:, 0]),
upscale_factor=(2, 4)
)
outputs_dr_preds = to_numpy(F.softmax(outputs_dr_preds))[0]
outputs_dr_gt = to_numpy(outputs_dr_gt)[0]
grid_preds = output_grid_to_image(outputs_dr_preds[np.newaxis, :], upscale_factor=(2, 4))
grid_gt = output_grid_to_image(outputs_dr_gt[np.newaxis, :], upscale_factor=(2, 4))
imgs_dr = np.hstack([
grid_gt,
np.zeros((grid_gt.shape[0], 4, 3), dtype=np.uint8),
grid_preds,
np.zeros((grid_gt.shape[0], 8, 3), dtype=np.uint8),
outputs_successor_dr_grid
])
successor_multiactions_str = " ".join(["%s%s" % (ma[0] if ma[0] != "~WS" else "_", ma[1] if ma[1] != "~AD" else "_") for ma in multiactions[0]])
imgs_dr = np.pad(imgs_dr, ((30, 0), (0, 300), (0, 0)), mode="constant", constant_values=0)
imgs_dr = util.draw_text(imgs_dr, x=0, y=0, text="DR curr bins gt:%s, pred:%s | successor preds\nsucc. mas: %s" % (str(np.argmax(outputs_dr_gt)), str(np.argmax(outputs_dr_preds)), successor_multiactions_str), size=9)
h_dr = imgs_dr.shape[0]
outputs_idr_preds = np.squeeze(to_numpy(outputs_idr_preds)[0])
outputs_idr_gt = np.squeeze(to_numpy(outputs_idr_gt)[0])
idr_text = [
"[IndirectReward A0]",
" gt: %.2f" % (outputs_idr_gt[..., 0],),
" pr: %.2f" % (outputs_idr_preds[..., 0],),
"[IndirectReward A1]",
" gt: %.2f" % (outputs_idr_gt[..., 1],),
" pr: %.2f" % (outputs_idr_preds[..., 1],),
"[IndirectReward A2]",
" gt: %.2f" % (outputs_idr_gt[..., 2],),
" pr: %.2f" % (outputs_idr_preds[..., 2],)
]
idr_text = "\n".join(idr_text)
outputs_successor_preds = np.squeeze(to_numpy(outputs_successor_preds)[:, 0, :])
outputs_successor_gt = np.squeeze(to_numpy(outputs_successor_gt)[:, 0, :])
distances = np.average((outputs_successor_preds - outputs_successor_gt) ** 2, axis=1)
successors_text = [
"[Successors]",
" Distances:",
" " + " ".join(["%02.2f" % (d,) for d in distances]),
" T=0 gt/pred:",
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_gt[0, 0:25]]),
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_preds[0, 0:25]]),
" T=1 gt/pred:",
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_gt[1, 0:25]]),
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_preds[1, 0:25]]),
" T=2 gt/pred:",
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_gt[2, 0:25]]),
" " + " ".join(["%+02.2f" % (val,) for val in outputs_successor_preds[2, 0:25]]),
]
successors_text = "\n".join(successors_text)
outputs_dr_successors_preds = np.squeeze(to_numpy(outputs_dr_successors_preds)[:, 0, :])
outputs_dr_successors_gt = np.squeeze(to_numpy(outputs_dr_successors_gt)[:, 0, :])
bins_dr_successors_preds = np.argmax(outputs_dr_successors_preds, axis=1)
bins_dr_successors_gt = np.argmax(outputs_dr_successors_gt, axis=1)
successors_dr_text = [
"[Direct rewards bins of successors]",
" gt: " + " ".join(["%d" % (b,) for b in bins_dr_successors_gt]),
" pred: " + " ".join(["%d" % (b,) for b in bins_dr_successors_preds])
]
successors_dr_text = "\n".join(successors_dr_text)
outputs_idr_successors_preds = np.squeeze(to_numpy(outputs_idr_successors_preds)[:, 0, :])
outputs_idr_successors_gt = np.squeeze(to_numpy(outputs_idr_successors_gt)[:, 0, :])
successors_idr_text = [
"[Indirect rewards of successors A0]",
" gt: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_gt[..., 0]]),
" pred: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_preds[..., 0]]),
"[Indirect rewards of successors A1]",
" gt: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_gt[..., 1]]),
" pred: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_preds[..., 1]]),
"[Indirect rewards of successors A2]",
" gt: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_gt[..., 2]]),
" pred: " + " ".join(["%+03.2f" % (v,) for v in outputs_idr_successors_preds[..., 2]])
]
successors_idr_text = "\n".join(successors_idr_text)
result = np.zeros((950, 320, 3), dtype=np.uint8)
spacing = 4
util.draw_image(result, x=0, y=0, other_img=imgs_in, copy=False)
util.draw_image(result, x=0, y=h_imgs+spacing, other_img=imgs_ae, copy=False)
util.draw_image(result, x=0, y=h_imgs+spacing+h_ae+spacing, other_img=imgs_dr, copy=False)
result = util.draw_text(result, x=0, y=h_imgs+spacing+h_ae+spacing+h_dr+spacing, text=idr_text + "\n" + successors_text + "\n" + successors_dr_text + "\n" + successors_idr_text, size=9)
return result
def to_rgb(im):
return np.tile(im[:,:,np.newaxis], (1, 1, 3))
def downscale(im):
return ia.imresize_single_image(im, (90, 160), interpolation="cubic")
def output_grid_to_image(output_grid, upscale_factor=(4, 4)):
if output_grid is None:
grid_vis = np.zeros((Config.MODEL_NB_REWARD_BINS, Config.MODEL_NB_FUTURE_BLOCKS), dtype=np.uint8)
else:
if output_grid.ndim == 3:
output_grid = output_grid[0]
grid_vis = (output_grid.transpose((1, 0)) * 255).astype(np.uint8)
grid_vis = np.tile(grid_vis[:, :, np.newaxis], (1, 1, 3))
if output_grid is None:
grid_vis[::2, ::2, :] = [255, 0, 0]
grid_vis = ia.imresize_single_image(grid_vis, (grid_vis.shape[0]*upscale_factor[0], grid_vis.shape[1]*upscale_factor[1]), interpolation="nearest")
grid_vis = np.pad(grid_vis, ((1, 1), (1, 1), (0, 0)), mode="constant", constant_values=128)
return grid_vis
def draw_successor_dr_grid(outputs_dr_successors_preds, outputs_dr_successors_gt, upscale_factor=(4, 4)):
T, S = outputs_dr_successors_preds.shape
cols = []
for t in range(T):
col = (outputs_dr_successors_preds[t][np.newaxis, :].transpose((1, 0)) * 255).astype(np.uint8)
col = np.tile(col[:, :, np.newaxis], (1, 1, 3))
correct_bin_idx = np.argmax(outputs_dr_successors_gt[t])
col[correct_bin_idx, 0, 2] = 255
col = ia.imresize_single_image(col, (col.shape[0]*upscale_factor[0], col.shape[1]*upscale_factor[1]), interpolation="nearest")
col = np.pad(col, ((1, 1), (1, 1), (0, 0)), mode="constant", constant_values=128)
cols.append(col)
return np.hstack(cols)
| mit |
xzturn/tensorflow | tensorflow/python/keras/datasets/mnist.py | 3 | 2515 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST handwritten digits dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.mnist.load_data')
def load_data(path='mnist.npz'):
"""Loads the [MNIST dataset](http://yann.lecun.com/exdb/mnist/).
This is a dataset of 60,000 28x28 grayscale images of the 10 digits,
along with a test set of 10,000 images.
More info can be found at the
(MNIST homepage)[http://yann.lecun.com/exdb/mnist/].
Arguments:
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**x_train, x_test**: uint8 arrays of grayscale image data with shapes
(num_samples, 28, 28).
**y_train, y_test**: uint8 arrays of digit labels (integers in range 0-9)
with shapes (num_samples,).
License:
Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset,
which is a derivative work from original NIST datasets.
MNIST dataset is made available under the terms of the
[Creative Commons Attribution-Share Alike 3.0 license.](
https://creativecommons.org/licenses/by-sa/3.0/)
"""
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file(
path,
origin=origin_folder + 'mnist.npz',
file_hash=
'731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')
with np.load(path, allow_pickle=True) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
keras-team/keras-io | examples/generative/adain.py | 1 | 22423 | """
Title: Neural Style Transfer with AdaIN
Author: [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha)
Date created: 2021/11/08
Last modified: 2021/11/08
Description: Neural Style Transfer with Adaptive Instance Normalization.
"""
"""
# Introduction
[Neural Style Transfer](https://www.tensorflow.org/tutorials/generative/style_transfer)
is the process of transferring the style of one image onto the content
of another. This was first introduced in the seminal paper
["A Neural Algorithm of Artistic Style"](https://arxiv.org/abs/1508.06576)
by Gatys et al. A major limitation of the technique proposed in this
work is in its runtime, as the algorithm uses a slow iterative
optimization process.
Follow-up papers that introduced
[Batch Normalization](https://arxiv.org/abs/1502.03167),
[Instance Normalization](https://arxiv.org/abs/1701.02096) and
[Conditional Instance Normalization](https://arxiv.org/abs/1610.07629)
allowed Style Transfer to be performed in new ways, no longer
requiring a slow iterative process.
Following these papers, the authors Xun Huang and Serge
Belongie propose
[Adaptive Instance Normalization](https://arxiv.org/abs/1703.06868) (AdaIN),
which allows arbitrary style transfer in real time.
In this example we implement Adapative Instance Normalization
for Neural Style Transfer. We show in the below figure the output
of our AdaIN model trained for
only **30 epochs**.
![Style transfer sample gallery](https://i.imgur.com/zDjDuea.png)
You can also try out the model with your own images with this
[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst).
"""
"""
# Setup
We begin with importing the necessary packages. We also set the
seed for reproducibility. The global variables are hyperparameters
which we can change as we like.
"""
import os
import glob
import imageio
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
from tensorflow.keras import layers
# Defining the global variables.
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 64
# Training for single epoch for time constraint.
# Please use atleast 30 epochs to see good results.
EPOCHS = 1
AUTOTUNE = tf.data.AUTOTUNE
"""
## Style transfer sample gallery
For Neural Style Transfer we need style images and content images. In
this example we will use the
[Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)
as our style dataset and
[Pascal VOC](https://www.tensorflow.org/datasets/catalog/voc)
as our content dataset.
This is a deviation from the original paper implementation by the
authors, where they use
[WIKI-Art](https://paperswithcode.com/dataset/wikiart) as style and
[MSCOCO](https://cocodataset.org/#home) as content datasets
respectively. We do this to create a minimal yet reproducible example.
## Downloading the dataset from Kaggle
The [Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)
dataset is hosted on Kaggle and one can easily download it in Colab by
following these steps:
- Follow the instructions [here](https://github.com/Kaggle/kaggle-api)
in order to obtain your Kaggle API keys in case you don't have them.
- Use the following command to upload the Kaggle API keys.
```python
from google.colab import files
files.upload()
```
- Use the following commands to move the API keys to the proper
directory and download the dataset.
```shell
$ mkdir ~/.kaggle
$ cp kaggle.json ~/.kaggle/
$ chmod 600 ~/.kaggle/kaggle.json
$ kaggle datasets download ikarus777/best-artworks-of-all-time
$ unzip -qq best-artworks-of-all-time.zip
$ rm -rf images
$ mv resized artwork
$ rm best-artworks-of-all-time.zip artists.csv
```
"""
"""
## `tf.data` pipeline
In this section, we will build the `tf.data` pipeline for the project.
For the style dataset, we decode, convert and resize the images from
the folder. For the content images we are already presented with a
`tf.data` dataset as we use the `tfds` module.
After we have our style and content data pipeline ready, we zip the
two together to obtain the data pipeline that our model will consume.
"""
def decode_and_resize(image_path):
"""Decodes and resizes an image from the image file path.
Args:
image_path: The image file path.
size: The size of the image to be resized to.
Returns:
A resized image.
"""
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
def extract_image_from_voc(element):
"""Extracts image from the PascalVOC dataset.
Args:
element: A dictionary of data.
size: The size of the image to be resized to.
Returns:
A resized image.
"""
image = element["image"]
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
# Get the image file paths for the style images.
style_images = os.listdir("artwork/resized")
style_images = [os.path.join("artwork/resized", path) for path in style_images]
# split the style images in train, val and test
total_style_images = len(style_images)
train_style = style_images[: int(0.8 * total_style_images)]
val_style = style_images[int(0.8 * total_style_images) : int(0.9 * total_style_images)]
test_style = style_images[int(0.9 * total_style_images) :]
# Build the style and content tf.data datasets.
train_style_ds = (
tf.data.Dataset.from_tensor_slices(train_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
train_content_ds = tfds.load("voc", split="train").map(extract_image_from_voc).repeat()
val_style_ds = (
tf.data.Dataset.from_tensor_slices(val_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
val_content_ds = (
tfds.load("voc", split="validation").map(extract_image_from_voc).repeat()
)
test_style_ds = (
tf.data.Dataset.from_tensor_slices(test_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
test_content_ds = (
tfds.load("voc", split="test")
.map(extract_image_from_voc, num_parallel_calls=AUTOTUNE)
.repeat()
)
# Zipping the style and content datasets.
train_ds = (
tf.data.Dataset.zip((train_style_ds, train_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
val_ds = (
tf.data.Dataset.zip((val_style_ds, val_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
test_ds = (
tf.data.Dataset.zip((test_style_ds, test_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
"""
## Visualizing the data
It is always better to visualize the data before training. To ensure
the correctness of our preprocessing pipeline, we visualize 10 samples
from our dataset.
"""
style, content = next(iter(train_ds))
fig, axes = plt.subplots(nrows=10, ncols=2, figsize=(5, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for (axis, style_image, content_image) in zip(axes, style[0:10], content[0:10]):
(ax_style, ax_content) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")
"""
## Architecture
The style transfer network takes a content image and a style image as
inputs and outputs the style transfered image. The authors of AdaIN
propose a simple encoder-decoder structure for achieving this.
![AdaIN architecture](https://i.imgur.com/JbIfoyE.png)
The content image (`C`) and the style image (`S`) are both fed to the
encoder networks. The output from these encoder networks (feature maps)
are then fed to the AdaIN layer. The AdaIN layer computes a combined
feature map. This feature map is then fed into a randomly initialized
decoder network that serves as the generator for the neural style
transfered image.
![AdaIn equation](https://i.imgur.com/hqhcBQS.png)
The style feature map (`fs`) and the content feature map (`fc`) are
fed to the AdaIN layer. This layer produced the combined feature map
`t`. The function `g` represents the decoder (generator) network.
"""
"""
### Encoder
The encoder is a part of the pretrained (pretrained on
[imagenet](https://www.image-net.org/)) VGG19 model. We slice the
model from the `block4-conv1` layer. The output layer is as suggested
by the authors in their paper.
"""
def get_encoder():
vgg19 = keras.applications.VGG19(
include_top=False,
weights="imagenet",
input_shape=(*IMAGE_SIZE, 3),
)
vgg19.trainable = False
mini_vgg19 = keras.Model(vgg19.input, vgg19.get_layer("block4_conv1").output)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="mini_vgg19")
"""
### Adaptive Instance Normalization
The AdaIN layer takes in the features
of the content and style image. The layer can be defined via the
following equation:
![AdaIn formula](https://i.imgur.com/tWq3VKP.png)
where `sigma` is the standard deviation and `mu` is the mean for the
concerned variable. In the above equation the mean and variance of the
content feature map `fc` is aligned with the mean and variance of the
style feature maps `fs`.
It is important to note that the AdaIN layer proposed by the authors
uses no other parameters apart from mean and variance. The layer also
does not have any trainable parameters. This is why we use a
*Python function* instead of using a *Keras layer*. The function takes
style and content feature maps, computes the mean and standard deviation
of the images and returns the adaptive instance normalized feature map.
"""
def get_mean_std(x, epsilon=1e-5):
axes = [1, 2]
# Compute the mean and standard deviation of a tensor.
mean, variance = tf.nn.moments(x, axes=axes, keepdims=True)
standard_deviation = tf.sqrt(variance + epsilon)
return mean, standard_deviation
def ada_in(style, content):
"""Computes the AdaIn feature map.
Args:
style: The style feature map.
content: The content feature map.
Returns:
The AdaIN feature map.
"""
content_mean, content_std = get_mean_std(content)
style_mean, style_std = get_mean_std(style)
t = style_std * (content - content_mean) / content_std + style_mean
return t
"""
### Decoder
The authors specify that the decoder network must mirror the encoder
network. We have symmetrically inverted the encoder to build our
decoder. We have used `UpSampling2D` layers to increase the spatial
resolution of the feature maps.
Note that the authors warn against using any normalization layer
in the decoder network, and do indeed go on to show that including
batch normalization or instance normalization hurts the performance
of the overall network.
This is the only portion of the entire architecture that is trainable.
"""
def get_decoder():
config = {"kernel_size": 3, "strides": 1, "padding": "same", "activation": "relu"}
decoder = keras.Sequential(
[
layers.InputLayer((None, None, 512)),
layers.Conv2D(filters=512, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=128, **config),
layers.Conv2D(filters=128, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=64, **config),
layers.Conv2D(
filters=3,
kernel_size=3,
strides=1,
padding="same",
activation="sigmoid",
),
]
)
return decoder
"""
### Loss functions
Here we build the loss functions for the neural style transfer model.
The authors propose to use a pretrained VGG-19 to compute the loss
function of the network. It is important to keep in mind that this
will be used for training only the decoder netwrok. The total
loss (`Lt`) is a weighted combination of content loss (`Lc`) and style
loss (`Ls`). The `lambda` term is used to vary the amount of style
transfered.
![The total loss](https://i.imgur.com/Q5y1jUM.png)
### Content Loss
This is the Euclidean distance between the content image features
and the features of the neural style transferred image.
![The content loss](https://i.imgur.com/dZ0uD0N.png)
Here the authors propose to use the output from the AdaIn layer `t` as
the content target rather than using features of the original image as
target. This is done to speed up convergence.
### Style Loss
Rather than using the more commonly used
[Gram Matrix](https://mathworld.wolfram.com/GramMatrix.html),
the authors propose to compute the difference between the statistical features
(mean and variance) which makes it conceptually cleaner. This can be
easily visualized via the following equation:
![The style loss](https://i.imgur.com/Ctclhn3.png)
where `theta` denotes the layers in VGG-19 used to compute the loss.
In this case this corresponds to:
- `block1_conv1`
- `block1_conv2`
- `block1_conv3`
- `block1_conv4`
"""
def get_loss_net():
vgg19 = keras.applications.VGG19(
include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3)
)
vgg19.trainable = False
layer_names = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1"]
outputs = [vgg19.get_layer(name).output for name in layer_names]
mini_vgg19 = keras.Model(vgg19.input, outputs)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="loss_net")
"""
## Neural Style Transfer
This is the trainer module. We wrap the encoder and decoder inside of
a `tf.keras.Model` subclass. This allows us to customize what happens
in the `model.fit()` loop.
"""
class NeuralStyleTransfer(tf.keras.Model):
def __init__(self, encoder, decoder, loss_net, style_weight, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.loss_net = loss_net
self.style_weight = style_weight
def compile(self, optimizer, loss_fn):
super().compile()
self.optimizer = optimizer
self.loss_fn = loss_fn
self.style_loss_tracker = keras.metrics.Mean(name="style_loss")
self.content_loss_tracker = keras.metrics.Mean(name="content_loss")
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
def train_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
with tf.GradientTape() as tape:
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
reconstructed_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, reconstructed_vgg_features[-1])
for inp, out in zip(style_vgg_features, reconstructed_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Compute gradients and optimize the decoder.
trainable_vars = self.decoder.trainable_variables
gradients = tape.gradient(total_loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
def test_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
recons_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, recons_vgg_features[-1])
for inp, out in zip(style_vgg_features, recons_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
@property
def metrics(self):
return [
self.style_loss_tracker,
self.content_loss_tracker,
self.total_loss_tracker,
]
"""
## Train Monitor callback
This callback is used to visualize the style transfer output of
the model at the end of each epoch. The objective of style transfer cannot be
quantified properly, and is to be subjectively evaluated by an audience.
For this reason, visualization is a key aspect of evaluating the model.
"""
test_style, test_content = next(iter(test_ds))
class TrainMonitor(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
# Encode the style and content image.
test_style_encoded = self.model.encoder(test_style)
test_content_encoded = self.model.encoder(test_content)
# Compute the AdaIN features.
test_t = ada_in(style=test_style_encoded, content=test_content_encoded)
test_reconstructed_image = self.model.decoder(test_t)
# Plot the Style, Content and the NST image.
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
ax[0].imshow(tf.keras.preprocessing.image.array_to_img(test_style[0]))
ax[0].set_title(f"Style: {epoch:03d}")
ax[1].imshow(tf.keras.preprocessing.image.array_to_img(test_content[0]))
ax[1].set_title(f"Content: {epoch:03d}")
ax[2].imshow(
tf.keras.preprocessing.image.array_to_img(test_reconstructed_image[0])
)
ax[2].set_title(f"NST: {epoch:03d}")
plt.show()
plt.close()
"""
## Train the model
In this section, we define the optimizer, the loss funtion, and the
trainer module. We compile the trainer module with the optimizer and
the loss function and then train it.
*Note*: We train the model for a single epoch for time constranints,
but we will need to train is for atleast 30 epochs to see good results.
"""
optimizer = keras.optimizers.Adam(learning_rate=1e-5)
loss_fn = keras.losses.MeanSquaredError()
encoder = get_encoder()
loss_net = get_loss_net()
decoder = get_decoder()
model = NeuralStyleTransfer(
encoder=encoder, decoder=decoder, loss_net=loss_net, style_weight=4.0
)
model.compile(optimizer=optimizer, loss_fn=loss_fn)
history = model.fit(
train_ds,
epochs=EPOCHS,
steps_per_epoch=50,
validation_data=val_ds,
validation_steps=50,
callbacks=[TrainMonitor()],
)
"""
## Inference
After we train the model, we now need to run inference with it. We will
pass arbitrary content and style images from the test dataset and take a look at
the output images.
*NOTE*: To try out the model on your own images, you can use this
[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst).
"""
for style, content in test_ds.take(1):
style_encoded = model.encoder(style)
content_encoded = model.encoder(content)
t = ada_in(style=style_encoded, content=content_encoded)
reconstructed_image = model.decoder(t)
fig, axes = plt.subplots(nrows=10, ncols=3, figsize=(10, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for axis, style_image, content_image, reconstructed_image in zip(
axes, style[0:10], content[0:10], reconstructed_image[0:10]
):
(ax_style, ax_content, ax_reconstructed) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")
ax_reconstructed.imshow(reconstructed_image)
ax_reconstructed.set_title("NST Image")
"""
## Conclusion
Adaptive Instance Normalization allows arbitrary style transfer in
real time. It is also important to note that the novel proposition of
the authors is to achieve this only by aligning the statistical
features (mean and standard deviation) of the style and the content
images.
*Note*: AdaIN also serves as the base for
[Style-GANs](https://arxiv.org/abs/1812.04948).
## Reference
- [TF implementation](https://github.com/ftokarev/tf-adain)
## Acknowledgement
We thank [Luke Wood](https://lukewood.xyz) for his
detailed review.
"""
| apache-2.0 |
humdings/pynance-legacy | quantopian/quandl.py | 1 | 3498 |
import datetime
import pandas as pd
class QuandlFetcher(object):
API_URL = 'http://www.quandl.com/api/v1/'
def __init__(self, auth_token=None):
self.auth_token = auth_token
def _append_query_fields(self, url, **kwargs):
field_values = ['{0}={1}'.format(key, val)
for key, val in kwargs.items() if val]
return url + 'request_source=python&request_version=2&' +'&'.join(field_values)
def _parse_dates(self, date):
if date is None:
return date
if isinstance(date, datetime.datetime):
return date.date().isoformat()
if isinstance(date, datetime.date):
return date.isoformat()
try:
date = pd.to_datetime(date)
except ValueError:
raise ValueError("{} is not recognised a date.".format(date))
return date.date().isoformat()
def build_url(self, dataset, **kwargs):
"""Return dataframe of requested dataset from Quandl.
:param dataset: str or list, depending on single dataset usage or multiset usage
Dataset codes are available on the Quandl website
:param str trim_start, trim_end: Optional datefilers, otherwise entire
dataset is returned
:param str collapse: Options are daily, weekly, monthly, quarterly, annual
:param str transformation: options are diff, rdiff, cumul, and normalize
:param int rows: Number of rows which will be returned
:param str sort_order: options are asc, desc. Default: `asc`
:param str text: specify whether to print output text to stdout, pass 'no' to supress output.
:returns: :class:`pandas.DataFrame` or :class:`numpy.ndarray`
Note that Pandas expects timeseries data to be sorted ascending for most
timeseries functionality to work.
Any other `kwargs` passed to `get` are sent as field/value params to Quandl
with no interference.
"""
auth_token = self.auth_token
kwargs.setdefault('sort_order', 'asc')
trim_start = self._parse_dates(kwargs.pop('trim_start', None))
trim_end = self._parse_dates(kwargs.pop('trim_end', None))
#Check whether dataset is given as a string (for a single dataset) or an array (for a multiset call)
#Unicode String
if type(dataset) == unicode or type(dataset) == str:
url = self.API_URL + 'datasets/{}.csv?'.format(dataset)
#Array
elif type(dataset) == list:
url = self.API_URL + 'multisets.csv?columns='
#Format for multisets call
dataset = [d.replace('/', '.') for d in dataset]
for i in dataset:
url += i + ','
#remove trailing ,
url = url[:-1] + '&'
#If wrong format
else:
error = "Your dataset must either be specified as a string (containing a Quandl code) or an array (of Quandl codes) for multisets"
raise Exception(error)
url = self._append_query_fields(
url,
auth_token=auth_token,
trim_start=trim_start,
trim_end=trim_end,
**kwargs
)
return url
def _download(url):
'''
Used to download data outside of Quantopian.
'''
dframe = pd.read_csv(url, index_col=0, parse_dates=True)
return dframe
| mit |
Yingmin-Li/keras | keras/datasets/imdb.py | 37 | 1855 | from __future__ import absolute_import
import six.moves.cPickle
import gzip
from .data_utils import get_file
import random
from six.moves import zip
import numpy as np
def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):
path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/imdb.pkl")
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
X, labels = six.moves.cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = X[:int(len(X)*(1-test_split))]
y_train = labels[:int(len(X)*(1-test_split))]
X_test = X[int(len(X)*(1-test_split)):]
y_test = labels[int(len(X)*(1-test_split)):]
return (X_train, y_train), (X_test, y_test)
| mit |
herilalaina/scikit-learn | examples/linear_model/plot_ridge_coeffs.py | 146 | 2785 | """
==============================================================
Plot Ridge coefficients as a function of the L2 regularization
==============================================================
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color in the left plot represents one different dimension of the
coefficient vector, and this is displayed as a function of the
regularization parameter. The right plot shows how exact the solution
is. This example illustrates how a well defined solution is
found by Ridge regression and how regularization affects the
coefficients and their values. The plot on the right shows how
the difference of the coefficients from the estimator changes
as a function of regularization.
In this example the dependent variable Y is set as a function
of the input features: y = X*w + c. The coefficient vector w is
randomly sampled from a normal distribution, whereas the bias term c is
set to a constant.
As alpha tends toward zero the coefficients found by Ridge
regression stabilize towards the randomly sampled vector w.
For big alpha (strong regularisation) the coefficients
are smaller (eventually converging at 0) leading to a
simpler and biased solution.
These dependencies can be observed on the left plot.
The right plot shows the mean squared error between the
coefficients found by the model and the chosen vector w.
Less regularised models retrieve the exact
coefficients (error is equal to 0), stronger regularised
models increase the error.
Please note that in this example the data is non-noisy, hence
it is possible to extract the exact coefficients.
"""
# Author: Kornel Kielczewski -- <kornel.k@plusnet.pl>
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
clf = Ridge()
X, y, w = make_regression(n_samples=10, n_features=10, coef=True,
random_state=1, bias=3.5)
coefs = []
errors = []
alphas = np.logspace(-6, 6, 200)
# Train the model with different regularisation strengths
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
errors.append(mean_squared_error(clf.coef_, w))
# Display results
plt.figure(figsize=(20, 6))
plt.subplot(121)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.subplot(122)
ax = plt.gca()
ax.plot(alphas, errors)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('error')
plt.title('Coefficient error as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nhuntwalker/astroML | book_figures/appendix/fig_LIGO_bandpower.py | 4 | 2122 | """
Plot the band power of the LIGO big dog event
---------------------------------------------
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_LIGO_bigdog
from astroML.fourier import FT_continuous
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def multiple_power_spectrum(t, x, window_size=10000, step_size=1000):
assert x.shape == t.shape
assert x.ndim == 1
assert len(x) > window_size
N_steps = (len(x) - window_size) // step_size
indices = np.arange(window_size) + step_size * np.arange(N_steps)[:, None]
X = x[indices].astype(complex)
f, H = FT_continuous(t[:window_size], X)
i = (f > 0)
return f[i], abs(H[:, i])
X = fetch_LIGO_bigdog()
t = X['t']
x = X['Hanford']
window_size = 10000
step_size = 500
f, P = multiple_power_spectrum(t, x,
window_size=window_size,
step_size=step_size)
i = (f > 50) & (f < 1500)
P = P[:, i]
f = f[i]
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(np.log10(P).T, origin='lower', aspect='auto',
extent=[t[window_size / 2],
t[window_size / 2 + step_size * P.shape[0]],
f[0], f[-1]])
plt.xlabel('t (s)')
plt.ylabel('f (Hz) derived from %.2fs window' % (t[window_size] - t[0]))
plt.colorbar().set_label('$|H(f)|$')
plt.show()
| bsd-2-clause |
h2oai/h2o | py/testdir_release/c3/test_c3_exec_copy.py | 9 | 4111 | import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_glm, h2o_common, h2o_exec as h2e
import h2o_print
DO_GLM = True
LOG_MACHINE_STATS = False
# fails during exec env push ..second import has to do a key delete (the first)
DO_DOUBLE_IMPORT = False
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def sub_c3_nongz_fvec_long(self, csvFilenameList):
# a kludge
h2o.setup_benchmark_log()
bucket = 'home-0xdiag-datasets'
importFolderPath = 'manyfiles-nflx'
print "Using nongz'ed files in", importFolderPath
if LOG_MACHINE_STATS:
benchmarkLogging = ['cpu', 'disk', 'network']
else:
benchmarkLogging = []
pollTimeoutSecs = 120
retryDelaySecs = 10
for trial, (csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
csvPathname = importFolderPath + "/" + csvFilepattern
if DO_DOUBLE_IMPORT:
(importResult, importPattern) = h2i.import_only(bucket=bucket, path=csvPathname, schema='local')
importFullList = importResult['files']
importFailList = importResult['fails']
print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# this accumulates performance stats into a benchmark log over multiple runs
# good for tracking whether we're getting slower or faster
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local',
hex_key="A.hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
fileMBS = (totalBytes/1e6)/elapsed
msg = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, csvFilepattern, csvFilename, fileMBS, elapsed)
print msg
h2o.cloudPerfH2O.message(msg)
h2o_cmd.checkKeyDistribution()
# are the unparsed keys slowing down exec?
h2i.delete_keys_at_all_nodes(pattern="manyfile")
execExpr = 'B.hex=A.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'C.hex=B.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'D.hex=C.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
#***********************************************************************
# these will be tracked individual by jenkins, which is nice
#***********************************************************************
def test_c3_exec_copy(self):
avgMichalSize = 237270000
csvFilenameList= [
("*[1][0-4][0-9].dat", "file_50_A.dat", 50 * avgMichalSize, 1800),
]
self.sub_c3_nongz_fvec_long(csvFilenameList)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
herilalaina/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 2 | 11123 | import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from scipy import sparse
from scipy.sparse import csgraph
from scipy.linalg import eigh
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.testing import assert_true, assert_equal, assert_raises
from sklearn.utils.testing import SkipTest
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(.1, 42, size=len(connections))
affinity = sparse.coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert_equal(component_1.sum(), component_size)
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert_equal(component_2.sum(), component_size)
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver # noqa
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(sparse.csr_matrix(graph)), False)
assert_equal(_graph_is_connected(sparse.csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(sparse.csr_matrix(graph)), True)
assert_equal(_graph_is_connected(sparse.csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = csgraph.laplacian(sims, normed=False,
return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
schets/scikit-learn | sklearn/cross_decomposition/pls_.py | 14 | 28526 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
check_is_fitted(self, 'coef_')
DeprecationWarning("``coefs`` attribute has been deprecated and will be "
"removed in version 0.17. Use ``coef_`` instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d with "
"X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
boland1992/seissuite_iran | build/lib.linux-x86_64-2.7/seissuite/spectrum/heat_pickle.py | 8 | 25534 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import shapefile
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from scipy import interpolate
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
import itertools
from scipy.interpolate import griddata
import random
from sklearn.cluster import DBSCAN
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
class InPoly:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. The class uses
the matplotlib Path class.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise boundary polygon nodes
self.nodes = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def poly_nodes(self):
"""
Function that returns the nodes of a shapefile as a (n,2) array.
"""
sf = shapefile.Reader(self.boundary)
poly = sf.shapes()[0]
#find polygon nodes lat lons
self.nodes = np.asarray(poly.points)
return self.nodes
def points_from_path(self, poly):
"""
Function that returns nodes from matplotlib Path object.
"""
return poly.vertices
def shapefile_poly(self):
"""
Function that imports a shapefile location path and returns
a matplotlib Path object representing this shape.
"""
self.nodes = self.poly_nodes()
#convert to a matplotlib path class!
self.polygon = Path(self.nodes)
return self.polygon
def node_poly(self, nodes):
"""
Function creates a matplotlib Path object from input nodes.
"""
#convert to a matplotlib path class!
polygon = Path(nodes)
return polygon
def points_in_shapefile_poly(self):
"""
Function that takes a single (2,1) coordinate input, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon.
"""
self.polygon = self.shapefile_poly()
points_in = self.polygon.contains_points(self.dots)
self.output = self.dots[points_in == True]
return np.asarray(self.output)
def points_in(self, points, poly=None, IN=True, indices=False):
"""
Function that takes a many (2,N) points, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon. If IN=True then the function will return points inside
the matplotlib Path object, else if IN=False then the function will
return the points outside the matplotlib Path object.
"""
if poly is None:
poly = self.shapefile_poly()
points_test = poly.contains_points(points)
if indices:
return points_test
else:
output = points[points_test == IN]
return np.asarray(output)
def bounds_poly(self, nodes=None):
"""
Function that returns boundaries of a shapefile polygon.
"""
if nodes is None:
nodes = self.poly_nodes()
xmin, xmax = np.min(nodes[:,0]), np.max(nodes[:,0])
ymin, ymax = np.min(nodes[:,1]), np.max(nodes[:,1])
return xmin, xmax, ymin, ymax
def poly_from_shape(self, shape=None, size=1., res=1):
"""
Function that returns a matplotlib Path object from
buffered shape points. if shape != None then the shape input
MUST be of type shapely polygon.
"""
SHAPE = InShape(self.boundary)
if shape is None:
# Generates shape object from shape_file input
shape = SHAPE
return self.node_poly(shape.external_coords(size=size, res=res))
else:
return self.node_poly(SHAPE.external_coords(shape=shape))
def rand_poly(self, poly=None, N=1e4, IN=True):
"""
Function that takes an input matplotlib Path object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Path object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else if
IN=False then the function will return the points outside the
matplotlib Path object.
"""
if poly is None:
#poly = self.shapefile_poly()
xmin, xmax, ymin, ymax = self.bounds_poly()
else:
nodes = self.points_from_path(poly)
xmin, xmax, ymin, ymax = self.bounds_poly(nodes=nodes)
X = abs(xmax - xmin) * np.random.rand(N,1) + xmin
Y = abs(ymax - ymin) * np.random.rand(N,1) + ymin
many_points = np.column_stack((X,Y))
many_points = self.points_in(many_points, poly=poly, IN=IN)
return many_points
def rand_shape(self, shape=None, N=1e4, IN=True):
"""
Function that takes an input shapely Polygon object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Polygon object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else
if IN=False then the function will return the points outside
the matplotlib Path object.
"""
if shape is None:
# Generates shape object from shape_file input
INSHAPE = InShape(self.boundary)
shape = self.node_poly(INSHAPE.external_coords())
xmin, xmax, ymin, ymax = INSHAPE.shape_bounds()
poly = self.node_poly(SHAPE.external_coords(shape=shape))
points = self.rand_poly(poly=poly, N=N, IN=IN)
return points
class Geodesic:
"""
Class defined in order to create to process points, distances and
other related geodesic calculations and functions
"""
def __init__(self, period_range=[1, 40], km_point=20., max_dist=2e3):
# initialise period_range as [1,40] default for ambient noise
self.per_range = period_range
self.km = km_point
self.max_dist = max_dist
def remove_distance(self, period_range, max_dist=None):
"""
Function that returns a given possible resolvable ambient noise
structure distance range, given the maximum period range
availabe to the study. The distance returned is in km.
Maximum distance default can be reassigned based on the cut-off found
by your time-lag plots for your study!
"""
if max_dist is None:
max_dist = self.max_dist
if type(period_range) == list:
min_dist = min(period_range) * 9
return [min_dist, max_dist]
elif type(period_range) == int or float:
return [period_range*9, max_dist]
def haversine(self, lon1, lat1, lon2, lat2, R=6371):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees). R is radius of
spherical earth. Default is 6371km.
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = R * c
return km
def fast_geodesic(self, lon1, lat1, lon2, lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def paths_calc(self, path_info, km_points=None, per_lims=None):
"""
Function that returns an array of coordinates equidistant along
a great cricle path between two lat-lon coordinates if these points
lay within a certain distance range ... otherwise the points return
only a set of zeros the same size as the array. Default is 1.0km
distance per point.
"""
if per_lims is None:
# if no new default for period limits is defined, then set the
# limit to the default.
per_lims = self.per_range
if km_points is None:
km_points = self.km
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
# interpoint distance <= 1 km, and nb of points >= 100
dist = self.haversine(lon1, lat1, lon2, lat2)
npts = max(int((np.ceil(dist) + 1) / km_points), 100)
path = self.fast_geodesic(lon1, lat1, lon2, lat2, npts)
dist_range = self.remove_distance(per_lims)
if min(dist_range) < dist < max(dist_range):
#remove the closest points along this line that fall below the distance
#find the index of the first point that is above this distance away!
pts_km = npts / float((np.ceil(dist) + 1)) #this gives pts/km
#remove all points below this index in the paths list
dist_index = pts_km * min(dist_range)
path = path[dist_index:]
return path
else:
return np.zeros_like(path)
def fast_paths(self, coord_list):
"""
Function that takes many point coordinate combinations and quickly
passes them through the paths_calc function. coord_list MUST be
of the shape (4, N) whereby each coordinate combination is in a
(4,1) row [lon1,lat1,lon2,lat2].
"""
return map(self.paths_calc, coord_list)
def combine_paths(self, paths):
"""
Function that takes many paths (should be array of same length as
number of stations). This is automatically generated by parallelising
the fast_paths function above.
The output array should only contain unique, no repeating paths
and should be of the shape (2,N) where N is a large number of coords.
"""
#create a flattened numpy array of size 2xN from the paths created!
paths = list(itertools.chain(*paths))
paths = np.asarray(list(itertools.chain\
(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(paths).view(np.dtype\
((np.void, paths.dtype.itemsize * \
paths.shape[1])))
_, idx = np.unique(b, return_index=True)
paths = np.unique(b).view(paths.dtype)\
.reshape(-1, paths.shape[1])
return paths
def remove_zeros(self, paths):
"""
Function that processes the flattened path output from combine_paths
and removes the zero paths created by paths_calc. Remove zeroes
from paths to ensure all paths that were NOT in the distance threshold
are removed from the path density calculation!
"""
path_lons, path_lats = paths[:,0], paths[:,1]
FIND_ZERO1 = np.where(paths[:,0]==0)[0]
FIND_ZERO2 = np.where(paths[:,1]==0)[0]
if len(FIND_ZERO1) != 0 and len(FIND_ZERO2) != 0:
path_lons = np.delete(path_lons, FIND_ZERO1)
path_lats = np.delete(path_lats, FIND_ZERO2)
return np.column_stack((path_lons, path_lats))
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
GEODESIC = Geodesic()
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
INPOLY = InPoly(shape_path)
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/\
AUSTRALIA 2014/noiseinfo_comb.pickle'
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/AUSTRALIA 2014/first_peak_dict_australia_2014.pickle'
pickle_file0 = '/storage/ANT/spectral_density/noise_info0.pickle'
comb_noise = '/storage/ANT/spectral_density/station_pds_maxima/total_noise_combination.pickle'
f = open(name=comb_noise, mode='rb')
noise_info0 = pickle.load(f)
f.close()
# sort the noise
noise_info0 = np.asarray(noise_info0)
#noise_info0 = noise_info0[np.argsort(noise_info0[:, 1])]
# Combine AU with S info
print len(noise_info0)
# find outliers
def reject_outliers(data, m=0.5):
return data[abs(data - np.mean(data)) < m * np.std(data)]
outliers = reject_outliers(noise_info0[:,2])
# remove outliers
noise_info0 = np.asarray([info for info in noise_info0 \
if info[2] in outliers])
# filter coordinates that are too close together.
min_dist = 1. #degrees
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
# next remove points outside of the given poly if applicable
coord_indices = INPOLY.points_in(coords, indices=True)
noise_info0 = noise_info0[coord_indices == True]
print noise_info0
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
coord_combs = np.asarray(list(itertools.combinations(coords,2)))
print len(coord_combs)
def coord_combinations(coord_combs):
lon1, lat1 = coord_combs[0][0], coord_combs[0][1]
lon2, lat2 = coord_combs[1][0], coord_combs[1][1]
return [coord_combs, GEODESIC.haversine(lon1, lat1, lon2, lat2)]
t0 = datetime.datetime.now()
pool = mp.Pool()
comb_dists = pool.map(coord_combinations, coord_combs)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
comb_dists = np.asarray(comb_dists)
# sort by distance
comb_dists = comb_dists[np.argsort(comb_dists[:, 1])]
# find where the distances are less than the min_dist
find_min = np.where(comb_dists[:,1]>min_dist)[0]
# remove points where the distances are less than the min_dist
comb_dists = np.delete(comb_dists, find_min, axis=0)
remaining_coords = comb_dists[:,0]
# get unique coordinates from remaining coords
#paths = list(itertools.chain(*paths))
remaining_coords = np.asarray(list(itertools.chain\
(*remaining_coords)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(remaining_coords).view(np.dtype\
((np.void, remaining_coords.dtype.itemsize * \
remaining_coords.shape[1])))
_, idx = np.unique(b, return_index=True)
remaining_coords = np.unique(b).view(remaining_coords.dtype)\
.reshape(-1, remaining_coords.shape[1])
#scan for all points that are within a degree radius of one another!
db = DBSCAN(eps=min_dist).fit(coords)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
clusters = []
cluster_keep = []
for k in unique_labels:
if k != -1:
class_member_mask = (labels == k)
cluster = coords[class_member_mask & core_samples_mask]
#xy = coords[class_member_mask & ~core_samples_mask]
# Select only 1 random point from each cluster to keep. Remove all others!
clusters.append(cluster)
cluster_keep.append(cluster[random.randint(0,len(cluster)-1)])
cluster_keep = np.asarray(cluster_keep)
# flatten clusters array
clusters = np.asarray(list(itertools.chain(*clusters)))
# remove all points in clusters from the overall coords array
coords = np.asarray([coord for coord in coords if coord not in clusters])
# place single representative point from cluster back into overall coord list
coords = np.append(coords, cluster_keep, axis=0)
print len(noise_info0)
# remove cluster coordinates from noise_info0
noise_info0 = np.asarray([info for info in noise_info0 \
if info[0] in coords[:,0]])
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n Australian Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
print "number of station points: ", len(noise_info0)
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
x, y = noise_info0[:,0], noise_info0[:,1]
points = np.column_stack((x,y))
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
values = noise_info0[:,2]
#now we create a grid of values, interpolated from our random sample above
y = np.linspace(ymin, ymax, 200)
x = np.linspace(xmin, xmax, 200)
gridx, gridy = np.meshgrid(x, y)
heat_field = griddata(points, values, (gridx, gridy),
method='cubic',fill_value=0)
#heat_field = np.where(heat_field < 0, 1, heat_field)
heat_field = np.ma.masked_where(heat_field==0,heat_field)
print gridx
plt.pcolor(gridx, gridy, heat_field,
cmap='rainbow',alpha=0.5, norm=LogNorm(vmin=100, vmax=3e4),
zorder=2)
plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap='rainbow', zorder=3)
#cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
#sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
# norm=LogNorm(vmin=100, vmax=3e4), s=50, cmap=cm, zorder=2)
col = plt.colorbar()
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/noise_map_all.svg',
format='SVG')
| gpl-3.0 |
jkeung/yellowbrick | tests/test_text/test_base.py | 2 | 1473 | # tests.test_text.test_base
# Tests for the text visualization base classes
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon Feb 20 06:34:50 2017 -0500
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_base.py [] benjamin@bengfort.com $
"""
Tests for the text visualization base classes
"""
##########################################################################
## Imports
##########################################################################
import unittest
from yellowbrick.base import *
from yellowbrick.text.base import *
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################################
## TextVisualizer Base Tests
##########################################################################
class TextVisualizerBaseTests(unittest.TestCase):
def test_subclass(self):
"""
Assert the text visualizer is subclassed correctly
"""
visualizer = TextVisualizer()
self.assertIsInstance(visualizer, TransformerMixin)
self.assertIsInstance(visualizer, BaseEstimator)
self.assertIsInstance(visualizer, Visualizer)
# def test_interface(self):
# """
# Test the feature visualizer interface
# """
#
# visualizer = TextVisualizer()
# with self.assertRaises(NotImplementedError):
# visualizer.poof()
| apache-2.0 |
schets/scikit-learn | examples/plot_kernel_ridge_regression.py | 229 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/tensorflow2/tensorflow2_saving_and_loading.py | 2 | 6119 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(512, activation='relu', input_shape=(784,)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# REF [site] >> https://www.tensorflow.org/tutorials/keras/save_and_load
def keras_example():
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
#----------------------------------------
# Save checkpoints during training.
# Checkpoint callback usage.
# Create a basic model instance.
model = create_model()
# Display the model's architecture.
model.summary()
# Create a callback that saves the model's weights.
checkpoint_filepath = 'training_1/ckpt'
#os.makedirs(checkpoint_filepath + '/variables') # When save_weights_only = False.
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
verbose=1)
callbacks = [checkpoint_callback]
# Train the model with the new callback.
model.fit(train_images,
train_labels,
epochs=10,
validation_data=(test_images, test_labels),
callbacks=callbacks) # Pass callback to training.
# This may generate warnings related to saving the state of the optimizer.
# These warnings (and similar warnings throughout this notebook)
# are in place to discourage outdated usage, and can be ignored.
#--------------------
# Create a basic model instance.
model = create_model()
# Evaluate the model.
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print('Untrained model, accuracy: {:5.2f}%'.format(100 * acc))
# Loads the weights.
model.load_weights(checkpoint_filepath)
# Re-evaluate the model.
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))
#----------------------------------------
# Checkpoint callback options.
# Include the epoch in the file name (uses 'str.format').
checkpoint_filepath = 'training_2/ckpt.{epoch:04d}'
#checkpoint_filepath = 'training_2/ckpt.{epoch:04d}-{val_loss:.5f}'
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
#monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='auto',
period=5)
callbacks = [checkpoint_callback]
# Create a new model instance.
model = create_model()
# Save the weights using the 'checkpoint_filepath' format.
model.save_weights(checkpoint_filepath.format(epoch=0))
# Train the model with the new callback.
model.fit(train_images,
train_labels,
epochs=50,
callbacks=callbacks,
validation_data=(test_images, test_labels),
verbose=0)
#--------------------
checkpoint_dir_path = os.path.dirname(checkpoint_filepath)
latest_checkpoint_filepath = tf.train.latest_checkpoint(checkpoint_dir_path)
print('Latest checkpoint filepath = {}.'.format(latest_checkpoint_filepath))
# Create a new model instance.
model = create_model()
# Load the previously saved weights.
model.load_weights(latest_checkpoint_filepath)
# Re-evaluate the model.
loss, acc = model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%.'.format(100 * acc))
#----------------------------------------
# Manually save weights.
# Save the weights.
model.save_weights('./checkpoints/my_ckpt')
# Create a new model instance.
model = create_model()
# Restore the weights.
model.load_weights('./checkpoints/my_ckpt')
# Evaluate the model.
loss,acc = model.evaluate(test_images, test_labels, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))
class MyModel(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(MyModel, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
# REF [site] >> https://www.tensorflow.org/guide/checkpoint
def checkpoint_example():
def toy_dataset():
inputs = tf.range(10.)[:, None]
labels = inputs * 5. + tf.range(5.)[None, :]
return tf.data.Dataset.from_tensor_slices(dict(x=inputs, y=labels)).repeat(10).batch(2)
def train_step(model, example, optimizer):
"""Trains 'model' on 'example' using 'optimizer'."""
with tf.GradientTape() as tape:
output = model(example['x'])
loss = tf.reduce_mean(tf.abs(output - example['y']))
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
#--------------------
model = MyModel()
optimizer = tf.keras.optimizers.Adam(0.1)
# Create the checkpoint objects.
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
# Train and checkpoint the model.
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print('Restored from {}.'.format(manager.latest_checkpoint))
else:
print('Initializing from scratch.')
for example in toy_dataset():
loss = train_step(model, example, optimizer)
ckpt.step.assign_add(1)
if int(ckpt.step) % 10 == 0:
save_path = manager.save()
print('Saved checkpoint for step {}: {}.'.format(int(ckpt.step), save_path))
print('Loss: {:1.2f}.'.format(loss.numpy()))
# REF [site] >> https://www.tensorflow.org/guide/saved_model
def saved_model_example():
raise NotImplementedError
def main():
keras_example()
#checkpoint_example()
#saved_model_example() # Not yet implemented.
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
yejingxin/PyKrige | pykrige/uk3d.py | 1 | 44478 | __doc__ = """Code by Benjamin S. Murphy
bscott.murphy@gmail.com
Dependencies:
numpy
scipy
matplotlib
Classes:
UniversalKriging3D: Support for 3D Universal Kriging.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import variogram_models
import core
class UniversalKriging3D:
"""class UniversalKriging3D
Three-dimensional universal kriging
Dependencies:
numpy
scipy
matplotlib
Inputs:
X (array-like): X-coordinates of data points.
Y (array-like): Y-coordinates of data points.
Z (array-like): Z-coordinates of data points.
Val (array-like): Values at data points.
variogram_model (string, optional): Specified which variogram model to use;
may be one of the following: linear, power, gaussian, spherical,
exponential. Default is linear variogram model. To utilize as custom variogram
model, specify 'custom'; you must also provide variogram_parameters and
variogram_function.
variogram_parameters (list, optional): Parameters that define the
specified variogram model. If not provided, parameters will be automatically
calculated such that the root-mean-square error for the fit variogram
function is minimized.
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
For a custom variogram model, the parameters are required, as custom variogram
models currently will not automatically be fit to the data. The code does not
check that the provided list contains the appropriate number of parameters for
the custom variogram model, so an incorrect parameter list in such a case will
probably trigger an esoteric exception someplace deep in the code.
variogram_function (callable, optional): A callable function that must be provided
if variogram_model is specified as 'custom'. The function must take only two
arguments: first, a list of parameters for the variogram model; second, the
distances at which to calculate the variogram model. The list provided in
variogram_parameters will be passed to the function as the first argument.
nlags (int, optional): Number of averaging bins for the semivariogram.
Default is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more important in
fitting a variogram model, so the option is provided to enable such weighting.)
anisotropy_scaling_y (float, optional): Scalar stretching value to take
into account anisotropy in the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z
is/are not 0).
anisotropy_scaling_z (float, optional): Scalar stretching value to take
into account anisotropy in the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z
is/are not 0).
anisotropy_angle_x (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
anisotropy_angle_y (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
anisotropy_angle_z (float, optional): CCW angle (in degrees) by which to
rotate coordinate system about the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation
is applied first, then y rotation, then z rotation. Scaling is applied after rotation.
drift_terms (list of strings, optional): List of drift terms to include in three-dimensional
universal kriging. Supported drift terms are currently 'regional_linear',
'specified', and 'functional'.
specified_drift (list of array-like objects, optional): List of arrays that contain
the drift values at data points. The arrays must be dim N, where N is the number
of data points. Any number of specified-drift terms may be used.
functional_drift (list of callable objects, optional): List of callable functions that
will be used to evaluate drift terms. The function must be a function of only the
three spatial coordinates and must return a single value for each coordinate triplet.
It must be set up to be called with only three arguments, first an array of x values,
the second an array of y values, and the third an array of z values. If the problem
involves anisotropy, the drift values are calculated in the adjusted data frame.
verbose (Boolean, optional): Enables program text output to monitor
kriging process. Default is False (off).
enable_plotting (Boolean, optional): Enables plotting to display
variogram. Default is False (off).
Callable Methods:
display_variogram_model(): Displays semivariogram and variogram model.
update_variogram_model(variogram_model, variogram_parameters=None, nlags=6,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
Changes the variogram model and variogram parameters for
the kriging system.
Inputs:
variogram_model (string): May be any of the variogram models
listed above. May also be 'custom', in which case variogram_parameters
and variogram_function must be specified.
variogram_parameters (list, optional): List of variogram model
parameters, as listed above. If not provided, a best fit model
will be calculated as described above.
variogram_function (callable, optional): A callable function that must be
provided if variogram_model is specified as 'custom'. See above for
more information.
nlags (int, optional): Number of averaging bins for the semivariogram.
Defualt is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
anisotropy_scaling (float, optional): Scalar stretching value to
take into account anisotropy. Default is 1 (effectively no
stretching). Scaling is applied in the y-direction.
anisotropy_angle (float, optional): Angle (in degrees) by which to
rotate coordinate system in order to take into account
anisotropy. Default is 0 (no rotation).
switch_verbose(): Enables/disables program text output. No arguments.
switch_plotting(): Enables/disable variogram plot display. No arguments.
get_epsilon_residuals(): Returns the epsilon residuals of the
variogram fit. No arguments.
plot_epsilon_residuals(): Plots the epsilon residuals of the variogram
fit in the order in which they were calculated. No arguments.
get_statistics(): Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
print_statistics(): Prints out the Q1, Q2, and cR statistics for
the variogram fit. NOTE that ideally Q1 is close to zero,
Q2 is close to 1, and cR is as small as possible.
execute(style, xpoints, ypoints, mask=None): Calculates a kriged grid.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as
arrays of x, y,z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, zpoints as arrays of
x, y, z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim N): If style is specific as 'grid' or 'masked',
x-coordinates of LxMxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim M): If style is specified as 'grid' or 'masked',
y-coordinates of LxMxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints (array-like, dim L): If style is specified as 'grid' or 'masked',
z-coordinates of LxMxN grid. If style is specified as 'points',
z-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular
grid defined by xpoints, ypoints, and zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked; True indicates that
the point should be masked.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Default is 'vectorized'.
specified_drift_arrays (list of array-like objects, optional): Specifies the drift
values at the points at which the kriging system is to be evaluated. Required if
'specified' drift provided in the list of drift terms when instantiating the
UniversalKriging3D class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be the same dimension
as the specified grid or have the same number of points as the specified points;
i.e., the arrays either must be dim LxMxN, where L is the number of z grid-points,
M is the number of y grid-points, and N is the number of x grid-points,
or dim N, where N is the number of points at which to evaluate the kriging system.
Outputs:
kvalues (numpy array, dim LxMxN or dim Nx1): Interpolated values of specified grid
or at the specified set of points. If style was specified as 'masked',
kvalues will be a numpy masked array.
sigmasq (numpy array, dim LxMxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
"""
UNBIAS = True # This can be changed to remove the unbiasedness condition
# Really for testing purposes only...
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model}
def __init__(self, x, y, z, val, variogram_model='linear', variogram_parameters=None,
variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0, drift_terms=None, specified_drift=None,
functional_drift=None, verbose=False, enable_plotting=False):
# Deal with mutable default argument
if drift_terms is None:
drift_terms = []
if specified_drift is None:
specified_drift = []
if functional_drift is None:
functional_drift = []
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(np.squeeze(np.array(x, copy=True)))
self.Y_ORIG = np.atleast_1d(np.squeeze(np.array(y, copy=True)))
self.Z_ORIG = np.atleast_1d(np.squeeze(np.array(z, copy=True)))
self.VALUES = np.atleast_1d(np.squeeze(np.array(val, copy=True)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print "Plotting Enabled\n"
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG))/2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print "Adjusting data for anisotropy..."
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG),
self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y,
self.anisotropy_scaling_z, self.anisotropy_angle_x, self.anisotropy_angle_y,
self.anisotropy_angle_z)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Initializing variogram model..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES,
self.variogram_model, variogram_parameters, self.variogram_function,
nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z_ADJUSTED, self.VALUES,
self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
if self.verbose:
print "Initializing drift terms..."
# Note that the regional linear drift values will be based on the adjusted coordinate system.
# Really, it doesn't actually matter which coordinate system is used here.
if 'regional_linear' in drift_terms:
self.regional_linear_drift = True
if self.verbose:
print "Implementing regional linear drift."
else:
self.regional_linear_drift = False
if 'specified' in drift_terms:
if type(specified_drift) is not list:
raise TypeError("Arrays for specified drift terms must be encapsulated in a list.")
if len(specified_drift) == 0:
raise ValueError("Must provide at least one drift-value array when using the "
"'specified' drift capability.")
self.specified_drift = True
self.specified_drift_data_arrays = []
for term in specified_drift:
specified = np.squeeze(np.array(term, copy=True))
if specified.size != self.X_ORIG.size:
raise ValueError("Must specify the drift values for each data point when using the "
"'specified' drift capability.")
self.specified_drift_data_arrays.append(specified)
else:
self.specified_drift = False
# The provided callable functions will be evaluated using the adjusted coordinates.
if 'functional' in drift_terms:
if type(functional_drift) is not list:
raise TypeError("Callables for functional drift terms must be encapsulated in a list.")
if len(functional_drift) == 0:
raise ValueError("Must provide at least one callable object when using the "
"'functional' drift capability.")
self.functional_drift = True
self.functional_drift_terms = functional_drift
else:
self.functional_drift = False
def update_variogram_model(self, variogram_model, variogram_parameters=None, variogram_function=None,
nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0):
"""Allows user to update variogram type and/or variogram model parameters."""
if anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or \
anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or \
anisotropy_angle_z != self.anisotropy_angle_z:
if self.verbose:
print "Adjusting data for anisotropy..."
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG),
self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y,
self.anisotropy_scaling_z, self.anisotropy_angle_x,
self.anisotropy_angle_y, self.anisotropy_angle_z)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Updating variogram mode..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES,
self.variogram_model, variogram_parameters, self.variogram_function,
nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z_ADJUSTED, self.VALUES,
self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
def display_variogram_model(self):
"""Displays variogram model with the actual binned data"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
return self.Q1, self.Q2, self.cR
def print_statistics(self):
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xyz, xyz, 'euclidean')
if self.UNBIAS:
a = np.zeros((n_withdrifts+1, n_withdrifts+1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
a[:n, i] = self.Z_ADJUSTED
a[i, :n] = self.Z_ADJUSTED
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
i += 1
if i != n_withdrifts:
print "WARNING: Error in creating kriging matrix. Kriging may fail."
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n:n_withdrifts + 1, n:n_withdrifts + 1] = 0.0
return a
def _exec_vector(self, a, bd, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
if self.UNBIAS:
b = np.zeros((npt, n_withdrifts+1, 1))
else:
b = np.zeros((npt, n_withdrifts, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
i = n
if self.regional_linear_drift:
b[:, i, 0] = xyz[:, 2]
i += 1
b[:, i, 0] = xyz[:, 1]
i += 1
b[:, i, 0] = xyz[:, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[:, i, 0] = spec_vals.flatten()
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[:, i, 0] = func(xyz[:, 2], xyz[:, 1], xyz[:, 0])
i += 1
if i != n_withdrifts:
print "WARNING: Error in setting up kriging system. Kriging may fail."
if self.UNBIAS:
b[:, n_withdrifts, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n_withdrifts+1, axis=1)
b = np.ma.array(b, mask=mask_b)
if self.UNBIAS:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts+1)).T).reshape((1, n_withdrifts+1, npt)).T
else:
x = np.dot(a_inv, b.reshape((npt, n_withdrifts)).T).reshape((1, n_withdrifts, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
if self.UNBIAS:
b = np.zeros((n_withdrifts+1, 1))
else:
b = np.zeros((n_withdrifts, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
i = n
if self.regional_linear_drift:
b[i, 0] = xyz[j, 2]
i += 1
b[i, 0] = xyz[j, 1]
i += 1
b[i, 0] = xyz[j, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[i, 0] = spec_vals.flatten()[i]
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[i, 0] = func(xyz[j, 2], xyz[j, 1], xyz[j, 0])
i += 1
if i != n_withdrifts:
print "WARNING: Error in setting up kriging system. Kriging may fail."
if self.UNBIAS:
b[n_withdrifts, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def execute(self, style, xpoints, ypoints, zpoints, mask=None, backend='vectorized', specified_drift_arrays=None):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation. Note that currently
measurements (i.e., z values) are considered 'exact'. This means that, when a specified
coordinate for interpolation is exactly the same as one of the data points, the variogram
evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is
also always forced to be zero. In forcing the variogram evaluated at data points to be zero,
we are effectively saying that there is no variance at that point (no uncertainty,
so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean flag that can be
adjusted to specify whether to treat the measurements as 'exact'. Setting the flag
to false would indicate that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the point will be
equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to
the nugget instead of to zero.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim N): If style is specific as 'grid' or 'masked',
x-coordinates of MxNxL grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim M): If style is specified as 'grid' or 'masked',
y-coordinates of LxMxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints (array-like, dim L): If style is specified as 'grid' or 'masked',
z-coordinates of LxMxN grid. If style is specified as 'points',
z-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular
grid defined by xpoints, ypoints, zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked, so the kriging system
will be solved at the point.
True indicates that the point should be masked, so the kriging system should
will not be solved at the point.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Default is 'vectorized'.
specified_drift_arrays (list of array-like objects, optional): Specifies the drift
values at the points at which the kriging system is to be evaluated. Required if
'specified' drift provided in the list of drift terms when instantiating the
UniversalKriging3D class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be the same dimension
as the specified grid or have the same number of points as the specified points;
i.e., the arrays either must be dim LxMxN, where L is the number of z grid-points,
M is the number of y grid-points, and N is the number of x grid-points,
or dim N, where N is the number of points at which to evaluate the kriging system.
Outputs:
kvalues (numpy array, dim LxMxN or dim N): Interpolated values of specified grid
or at the specified set of points. If style was specified as 'masked',
kvalues will be a numpy masked array.
sigmasq (numpy array, dim LxMxN or dim N): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
"""
if self.verbose:
print "Executing Ordinary Kriging...\n"
if style != 'grid' and style != 'masked' and style != 'points':
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
n_withdrifts = n
if self.regional_linear_drift:
n_withdrifts += 3
if self.specified_drift:
n_withdrifts += len(self.specified_drift_data_arrays)
if self.functional_drift:
n_withdrifts += len(self.functional_drift_terms)
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n, n_withdrifts)
if style in ['grid', 'masked']:
if style == 'masked':
if mask is None:
raise IOError("Must specify boolean masking array when style is 'masked'.")
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny:
mask = mask.swapaxes(0, 2)
else:
raise ValueError("Mask dimensions do not match specified grid dimensions.")
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij')
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == 'points':
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError("xpoints and ypoints must have same dimensions "
"when treated as listing discrete points.")
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
if specified_drift_arrays is None:
specified_drift_arrays = []
spec_drift_grids = []
if self.specified_drift:
if len(specified_drift_arrays) == 0:
raise ValueError("Must provide drift values for kriging points when using "
"'specified' drift capability.")
if type(specified_drift_arrays) is not list:
raise TypeError("Arrays for specified drift terms must be encapsulated in a list.")
for spec in specified_drift_arrays:
if style in ['grid', 'masked']:
if spec.ndim < 3:
raise ValueError("Dimensions of drift values array do not match specified grid dimensions.")
elif spec.shape[0] != nz or spec.shape[1] != ny or spec.shape[2] != nx:
if spec.shape[0] == nx and spec.shape[2] == nz and spec.shape[1] == ny:
spec_drift_grids.append(np.squeeze(spec.swapaxes(0, 2)))
else:
raise ValueError("Dimensions of drift values array do not match specified grid dimensions.")
else:
spec_drift_grids.append(np.squeeze(spec))
elif style == 'points':
if spec.ndim != 1:
raise ValueError("Dimensions of drift values array do not match specified grid dimensions.")
elif spec.shape[0] != xpts.size:
raise ValueError("Number of supplied drift values in array do not match "
"specified number of kriging points.")
else:
spec_drift_grids.append(np.squeeze(spec))
if len(spec_drift_grids) != len(self.specified_drift_data_arrays):
raise ValueError("Inconsistent number of specified drift terms supplied.")
else:
if len(specified_drift_arrays) != 0:
print "WARNING: Provided specified drift values, but 'specified' drift was not initialized during " \
"instantiation of UniversalKriging3D class."
xpts, ypts, zpts = core.adjust_for_anisotropy_3d(xpts, ypts, zpts, self.XCENTER, self.YCENTER, self.ZCENTER,
self.anisotropy_scaling_y, self.anisotropy_scaling_z,
self.anisotropy_angle_x, self.anisotropy_angle_y,
self.anisotropy_angle_z)
if style != 'masked':
mask = np.zeros(npt, dtype='bool')
xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1)
xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis]), axis=1)
bd = cdist(xyz_points, xyz_data, 'euclidean')
if backend == 'vectorized':
kvalues, sigmasq = self._exec_vector(a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids)
elif backend == 'loop':
kvalues, sigmasq = self._exec_loop(a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids)
else:
raise ValueError('Specified backend {} is not supported for 3D ordinary kriging.'.format(backend))
if style == 'masked':
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ['masked', 'grid']:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq | bsd-3-clause |
yantrabuddhi/opencog | opencog/nlp/sentiment/basic_sentiment_analysis.py | 11 | 6919 | # coding: utf-8
"""
basic_sentiment_analysis
~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the code and examples described in
http://fjavieralba.com/basic-sentiment-analysis-with-python.html
Modified by Ruiting Lian, 2016/7
"""
import nltk
import yaml
import sys
import os
import re
class Splitter(object):
def __init__(self):
self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle')
self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer()
def split(self, text):
"""
input format: a paragraph of text
output format: a list of lists of words.
e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']]
"""
sentences = self.nltk_splitter.tokenize(text)
tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences]
return tokenized_sentences
class POSTagger(object):
def __init__(self):
pass
def pos_tag(self, sentences):
"""
input format: list of lists of words
e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']]
output format: list of lists of tagged tokens. Each tagged tokens has a
form, a lemma, and a list of tags
e.g: [[('this', 'this', ['DT']), ('is', 'be', ['VB']), ('a', 'a', ['DT']), ('sentence', 'sentence', ['NN'])],
[('this', 'this', ['DT']), ('is', 'be', ['VB']), ('another', 'another', ['DT']), ('one', 'one', ['CARD'])]]
"""
pos = [nltk.pos_tag(sentence) for sentence in sentences]
#adapt format
pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos]
return pos
class DictionaryTagger(object):
def __init__(self, dictionary_paths):
files = [open(path, 'r') for path in dictionary_paths]
dictionaries = [yaml.safe_load(dict_file) for dict_file in files]
map(lambda x: x.close(), files)
self.dictionary = {}
self.max_key_size = 0
for curr_dict in dictionaries:
for key in curr_dict:
if key in self.dictionary:
self.dictionary[key].extend(curr_dict[key])
elif key is not False and key is not True:
self.dictionary[key] = curr_dict[key]
self.max_key_size = max(self.max_key_size, len(key))
elif key is False:
# print curr_dict[key]
key = "false"
self.dictionary[key] = curr_dict [False]
self.max_key_size = max(self.max_key_size, len(key))
else:
key = "true"
self.dictionary[key] = curr_dict [True]
self.max_key_size = max(self.max_key_size, len(key))
def tag(self, postagged_sentences):
return [self.tag_sentence(sentence) for sentence in postagged_sentences]
def tag_sentence(self, sentence, tag_with_lemmas=False):
"""
the result is only one tagging of all the possible ones.
The resulting tagging is determined by these two priority rules:
- longest matches have higher priority
- search is made from left to right
"""
tag_sentence = []
N = len(sentence)
if self.max_key_size == 0:
self.max_key_size = N
i = 0
while (i < N):
j = min(i + self.max_key_size, N) #avoid overflow
tagged = False
while (j > i):
expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower()
expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower()
if tag_with_lemmas:
literal = expression_lemma
else:
literal = expression_form
if literal in self.dictionary:
#self.logger.debug("found: %s" % literal)
is_single_token = j - i == 1
original_position = i
i = j
taggings = [tag for tag in self.dictionary[literal]]
tagged_expression = (expression_form, expression_lemma, taggings)
if is_single_token: #if the tagged literal is a single token, conserve its previous taggings:
original_token_tagging = sentence[original_position][2]
tagged_expression[2].extend(original_token_tagging)
tag_sentence.append(tagged_expression)
tagged = True
else:
j = j - 1
if not tagged:
tag_sentence.append(sentence[i])
i += 1
return tag_sentence
def value_of(sentiment):
if sentiment == 'positive': return 1
if sentiment == 'negative': return -1
return 0
def sentence_score(sentence_tokens, previous_token, acum_score, neg_num):
if not sentence_tokens:
if(neg_num % 2 == 0):
return acum_score
else:
acum_score *= -1.0
return acum_score
else:
current_token = sentence_tokens[0]
tags = current_token[2]
token_score = sum([value_of(tag) for tag in tags])
if previous_token is not None:
previous_tags = previous_token[2]
if 'inc' in previous_tags:
token_score *= 2.0
elif 'dec' in previous_tags:
token_score /= 2.0
elif 'inv' in previous_tags:
neg_num += 1
return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score, neg_num)
def sentiment_score(review):
return sum([sentence_score(sentence, None, 0.0, 0) for sentence in review])
configpath = '/usr/local/etc/'
path = os.path.join(configpath, 'opencog/dicts');
dictfilenames = ['positive.yml', 'negative.yml', 'inc.yml', 'dec.yml', 'inv.yml']
dicttagger = DictionaryTagger([os.path.join(path, d) for d in dictfilenames])
def sentiment_parse(plain_text):
splitter = Splitter()
postagger = POSTagger()
splitted_sentences = splitter.split(plain_text)
pos_tagged_sentences = postagger.pos_tag(splitted_sentences)
dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences)
score = sentiment_score(dict_tagged_sentences)
return score
if __name__ == "__main__":
#text = """What can I say about this place. The staff of the restaurant is
#nice and the eggplant is not bad. Apart from that, very uninspired food,
#lack of atmosphere and too expensive. I am a staunch vegetarian and was
#sorely dissapointed with the veggie options on the menu. Will be the last
#time I visit, I recommend others to avoid."""
text = """His statement is false. So he is a dishonest guy."""
score = sentiment_parse(text)
print(score)
| agpl-3.0 |
Yingmin-Li/keras | examples/cifar10_cnn.py | 35 | 4479 | from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
from six.moves import range
'''
Train a (fairly simple) deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
Note: the data was pickled with Python 2, and some encoding issues might prevent you
from loading it in Python 3. You might have to load it in Python 2,
save it in a different format, load it in Python 3 and repickle it.
'''
batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print("Not using data augmentation or normalization")
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print('Test score:', score)
else:
print("Using real time data augmentation")
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
for e in range(nb_epoch):
print('-'*40)
print('Epoch', e)
print('-'*40)
print("Training...")
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
loss = model.train_on_batch(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("train loss", loss)])
print("Testing...")
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
for X_batch, Y_batch in datagen.flow(X_test, Y_test):
score = model.test_on_batch(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("test loss", score)])
| mit |
herilalaina/scikit-learn | sklearn/ensemble/forest.py | 6 | 79027 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
import threading
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
# This is a utility function for joblib's Parallel. It can't go locally in
# ForestClassifier or ForestRegressor, because joblib complains that it cannot
# pickle it when placed there.
def accumulate_prediction(predict, X, out, lock):
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict_proba, X, all_proba, lock)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=2, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(clf.feature_importances_)
[ 0.17287856 0.80608704 0.01884792 0.00218648]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(regr.feature_importances_)
[ 0.17339552 0.81594114 0. 0.01066333]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-2.50699856]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/tests/test_docstring_parameters.py | 22 | 5738 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
import inspect
import sys
import warnings
import importlib
from pkgutil import walk_packages
from inspect import getsource, isabstract
import sklearn
from sklearn.base import signature
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_docstring_parameters
from sklearn.utils.testing import _get_func_name
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.deprecation import _is_deprecated
PUBLIC_MODULES = set([pckg[1] for pckg in walk_packages(prefix='sklearn.',
path=sklearn.__path__)
if not ("._" in pckg[1] or ".tests." in pckg[1])])
# TODO Uncomment all modules and fix doc inconsistencies everywhere
# The list of modules that are not tested for now
IGNORED_MODULES = (
'cross_decomposition',
'covariance',
'cluster',
'datasets',
'decomposition',
'feature_extraction',
'gaussian_process',
'linear_model',
'manifold',
'metrics',
'discriminant_analysis',
'ensemble',
'feature_selection',
'kernel_approximation',
'model_selection',
'multioutput',
'random_projection',
'setup',
'svm',
'utils',
'neighbors',
# Deprecated modules
'cross_validation',
'grid_search',
'learning_curve',
)
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
'sklearn.utils.deprecation.load_mlcomp',
'sklearn.pipeline.make_pipeline',
'sklearn.pipeline.make_union',
'sklearn.utils.extmath.safe_sparse_dot',
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
'fit',
'score',
'fit_predict',
'fit_transform',
'partial_fit',
'predict'
]
def test_docstring_parameters():
# Test module docstring formatting
# Skip test if numpydoc is not found or if python version is < 3.5
try:
import numpydoc # noqa
assert sys.version_info >= (3, 5)
except (ImportError, AssertionError):
raise SkipTest("numpydoc is required to test the docstrings, "
"as well as python version >= 3.5")
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
if name.startswith('_') or name.split(".")[1] in IGNORED_MODULES:
continue
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
# Exclude imported classes
classes = [cls for cls in classes if cls[1].__module__ == name]
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES or cname.startswith('_'):
continue
if isabstract(cls):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
cls_init = getattr(cls, '__init__', None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(
cls.__init__, cdoc, class_name=cname)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if ('y' in sig.parameters and
sig.parameters['y'].default is None):
param_ignore = ['y'] # ignore y for fit and score
result = check_docstring_parameters(
method, ignore=param_ignore, class_name=cname)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
# Exclude imported functions
functions = [fn for fn in functions if fn[1].__module__ == name]
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith('_'):
continue
if fname == "configuration" and name.endswith("setup"):
continue
name_ = _get_func_name(func)
if (not any(d in name_ for d in _DOCSTRING_IGNORES) and
not _is_deprecated(func)):
incorrect += check_docstring_parameters(func)
msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
if len(incorrect) > 0:
raise AssertionError("Docstring Error: " + msg)
@ignore_warnings(category=DeprecationWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(sklearn.__path__,
prefix='sklearn.'):
# because we don't import
mod = importlib.import_module(modname)
try:
source = getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them ',
'or add it to theignore list'
% modname)
| bsd-3-clause |
rflamary/POT | examples/plot_otda_semi_supervised.py | 2 | 4643 | # -*- coding: utf-8 -*-
"""
============================================
OTDA unsupervised vs semi-supervised setting
============================================
This example introduces a semi supervised domain adaptation in a 2D setting.
It explicits the problem of semi supervised domain adaptation and introduces
some optimal transport approaches to solve it.
Quantities such as optimal couplings, greater coupling coefficients and
transported samples are represented in order to give a visual understanding
of what the transport methods are doing.
"""
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import matplotlib.pylab as pl
import ot
##############################################################################
# Generate data
# -------------
n_samples_source = 150
n_samples_target = 150
Xs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source)
Xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target)
##############################################################################
# Transport source samples onto target samples
# --------------------------------------------
# unsupervised domain adaptation
ot_sinkhorn_un = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn_un.fit(Xs=Xs, Xt=Xt)
transp_Xs_sinkhorn_un = ot_sinkhorn_un.transform(Xs=Xs)
# semi-supervised domain adaptation
ot_sinkhorn_semi = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn_semi.fit(Xs=Xs, Xt=Xt, ys=ys, yt=yt)
transp_Xs_sinkhorn_semi = ot_sinkhorn_semi.transform(Xs=Xs)
# semi supervised DA uses available labaled target samples to modify the cost
# matrix involved in the OT problem. The cost of transporting a source sample
# of class A onto a target sample of class B != A is set to infinite, or a
# very large value
# note that in the present case we consider that all the target samples are
# labeled. For daily applications, some target sample might not have labels,
# in this case the element of yt corresponding to these samples should be
# filled with -1.
# Warning: we recall that -1 cannot be used as a class label
##############################################################################
# Fig 1 : plots source and target samples + matrix of pairwise distance
# ---------------------------------------------------------------------
pl.figure(1, figsize=(10, 10))
pl.subplot(2, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Source samples')
pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Target samples')
pl.subplot(2, 2, 3)
pl.imshow(ot_sinkhorn_un.cost_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Cost matrix - unsupervised DA')
pl.subplot(2, 2, 4)
pl.imshow(ot_sinkhorn_semi.cost_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Cost matrix - semisupervised DA')
pl.tight_layout()
# the optimal coupling in the semi-supervised DA case will exhibit " shape
# similar" to the cost matrix, (block diagonal matrix)
##############################################################################
# Fig 2 : plots optimal couplings for the different methods
# ---------------------------------------------------------
pl.figure(2, figsize=(8, 4))
pl.subplot(1, 2, 1)
pl.imshow(ot_sinkhorn_un.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nUnsupervised DA')
pl.subplot(1, 2, 2)
pl.imshow(ot_sinkhorn_semi.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSemi-supervised DA')
pl.tight_layout()
##############################################################################
# Fig 3 : plot transported samples
# --------------------------------
# display transported samples
pl.figure(4, figsize=(8, 4))
pl.subplot(1, 2, 1)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_sinkhorn_un[:, 0], transp_Xs_sinkhorn_un[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nEmdTransport')
pl.legend(loc=0)
pl.xticks([])
pl.yticks([])
pl.subplot(1, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_sinkhorn_semi[:, 0], transp_Xs_sinkhorn_semi[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nSinkhornTransport')
pl.xticks([])
pl.yticks([])
pl.tight_layout()
pl.show()
| mit |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/GoogleTraining/workshop_sections/mnist_series/the_hard_way/mnist_onehlayer.py | 3 | 9561 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import math
import os
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
# Define some constants.
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
# Batch size. Must be evenly dividable by dataset sizes.
BATCH_SIZE = 100
EVAL_BATCH_SIZE = 3
# Number of units in hidden layers.
HIDDEN1_UNITS = 128
FLAGS = None
# Build inference graph.
def mnist_inference(images, hidden1_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder.
hidden1_units: Size of the first hidden layer.
Returns:
logits: Output tensor with the computed logits.
"""
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden1, weights) + biases
# Uncomment the following line to see what we have constructed.
# tf.train.write_graph(tf.get_default_graph().as_graph_def(),
# "/tmp", "inference.pbtxt", as_text=True)
return logits
# Build training graph.
def mnist_training(logits, labels, learning_rate):
"""Build the training graph.
Args:
logits: Logits tensor, float - [BATCH_SIZE, NUM_CLASSES].
labels: Labels tensor, int32 - [BATCH_SIZE], with values in the
range [0, NUM_CLASSES).
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
loss: The Op for calculating loss.
"""
# Create an operation that calculates loss.
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
# Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op, loss
def main(_):
"""Build the full graph for feeding inputs, training, and
saving checkpoints. Run the training. Then, load the saved graph and
run some predictions."""
# Get input data: get the sets of images and labels for training,
# validation, and test on MNIST.
data_sets = read_data_sets(FLAGS.data_dir, False)
mnist_graph = tf.Graph()
with mnist_graph.as_default():
# Generate placeholders for the images and labels.
images_placeholder = tf.placeholder(tf.float32)
labels_placeholder = tf.placeholder(tf.int32)
tf.add_to_collection("images", images_placeholder) # Remember this Op.
tf.add_to_collection("labels", labels_placeholder) # Remember this Op.
# Build a Graph that computes predictions from the inference model.
logits = mnist_inference(images_placeholder,
HIDDEN1_UNITS)
tf.add_to_collection("logits", logits) # Remember this Op.
# Add to the Graph the Ops that calculate and apply gradients.
train_op, loss = mnist_training(
logits, labels_placeholder, 0.01)
# prediction accuracy
_, indices_op = tf.nn.top_k(logits)
flattened = tf.reshape(indices_op, [-1])
correct_prediction = tf.cast(
tf.equal(labels_placeholder, flattened), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
# Define info to be used by the SummaryWriter. This will let
# TensorBoard plot values during the training process.
loss_summary = tf.scalar_summary("loss", loss)
train_summary_op = tf.merge_summary([loss_summary])
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a summary writer.
print("Writing Summaries to %s" % FLAGS.model_dir)
train_summary_writer = tf.train.SummaryWriter(FLAGS.model_dir)
# Run training and save checkpoint at the end.
with tf.Session(graph=mnist_graph) as sess:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.num_steps):
# Read a batch of images and labels.
images_feed, labels_feed = data_sets.train.next_batch(BATCH_SIZE)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value, tsummary, acc = sess.run(
[train_op, loss, train_summary_op, accuracy],
feed_dict={images_placeholder: images_feed,
labels_placeholder: labels_feed})
if step % 100 == 0:
# Write summary info
train_summary_writer.add_summary(tsummary, step)
if step % 1000 == 0:
# Print loss/accuracy info
print('----Step %d: loss = %.4f' % (step, loss_value))
print("accuracy: %s" % acc)
print("\nWriting checkpoint file.")
checkpoint_file = os.path.join(FLAGS.model_dir, 'checkpoint')
saver.save(sess, checkpoint_file, global_step=step)
_, loss_value = sess.run(
[train_op, loss],
feed_dict={images_placeholder: data_sets.test.images,
labels_placeholder: data_sets.test.labels})
print("Test set loss: %s" % loss_value)
# Run evaluation based on the saved checkpoint.
with tf.Session(graph=tf.Graph()) as sess:
checkpoint_file = tf.train.latest_checkpoint(FLAGS.model_dir)
print("\nRunning predictions based on saved checkpoint.")
print("checkpoint file: {}".format(checkpoint_file))
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Retrieve the Ops we 'remembered'.
logits = tf.get_collection("logits")[0]
images_placeholder = tf.get_collection("images")[0]
labels_placeholder = tf.get_collection("labels")[0]
# Add an Op that chooses the top k predictions.
eval_op = tf.nn.top_k(logits)
# Run evaluation.
images_feed, labels_feed = data_sets.validation.next_batch(
EVAL_BATCH_SIZE)
prediction = sess.run(eval_op,
feed_dict={images_placeholder: images_feed,
labels_placeholder: labels_feed})
for i in range(len(labels_feed)):
print("Ground truth: %d\nPrediction: %d" %
(labels_feed[i], prediction.indices[i][0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/MNIST_data',
help='Directory for storing data')
parser.add_argument('--num_steps', type=int,
default=25000,
help='Number of training steps to run')
parser.add_argument('--model_dir', type=str,
default=os.path.join(
"/tmp/tfmodels/mnist_onehlayer",
str(int(time.time()))),
help='Directory for storing model info')
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
marmarko/ml101 | tensorflow/examples/skflow/multioutput_regression.py | 9 | 2552 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example uses the same data as one here:
http://scikit-learn.org/stable/auto_examples/tree/plot_tree_regression_multioutput.html
Instead of DecisionTree a 2-layer Deep Neural Network with RELU activations is used.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from tensorflow.contrib import learn
# Create random dataset.
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
# Fit regression DNN models.
regressors = []
options = [[2], [10, 10], [20, 20]]
for hidden_units in options:
def tanh_dnn(X, y):
features = learn.ops.dnn(X, hidden_units=hidden_units,
activation=learn.tf.tanh)
return learn.models.linear_regression(features, y)
regressor = learn.TensorFlowEstimator(model_fn=tanh_dnn, n_classes=0,
steps=500, learning_rate=0.1, batch_size=100)
regressor.fit(X, y)
score = mean_squared_error(regressor.predict(X), y)
print("Mean Squared Error for {0}: {1:f}".format(str(hidden_units), score))
regressors.append(regressor)
# Predict on new random Xs.
X_test = np.arange(-100.0, 100.0, 0.1)[:, np.newaxis]
y_1 = regressors[0].predict(X_test)
y_2 = regressors[1].predict(X_test)
y_3 = regressors[2].predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g",
label="hidden_units{}".format(str(options[0])))
plt.scatter(y_2[:, 0], y_2[:, 1], c="r",
label="hidden_units{}".format(str(options[1])))
plt.scatter(y_3[:, 0], y_3[:, 1], c="b",
label="hidden_units{}".format(str(options[2])))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output DNN Regression")
plt.legend()
plt.show()
| bsd-2-clause |
herilalaina/scikit-learn | sklearn/linear_model/base.py | 28 | 21031 | """
Generalized Linear models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Maryan Morel <maryan.morel@polytechnique.edu>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import check_array, check_X_y, deprecated, as_float_array
from ..utils.validation import FLOAT_DTYPES
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ..preprocessing.data import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset
seed = rng.randint(1, np.iinfo(np.int32).max)
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
@deprecated("sparse_center_data was deprecated in version 0.18 and will be "
"removed in 0.20. Use utilities in preprocessing.data instead")
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_offset, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_offset = y.mean(axis=0)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
@deprecated("center_data was deprecated in version 0.18 and will be removed "
"in 0.20. Use utilities in preprocessing.data instead")
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False):
"""
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset[:] = X.dtype.type(0)
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin(object):
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, "coef_", msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
Returns
-------
self : estimator
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, "coef_", msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(LinearModel, RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional, default True
whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on
an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy):
"""Aux function used at beginning of fit in linear models"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
# copy is not needed here as X is not modified inplace when X is sparse
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=False, return_mean=True)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, six.string_types) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/ext/test/file_format/hdf_test.py | 2 | 3531 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import h5py
# REF [site] >> https://docs.h5py.org/en/stable/quick.html
def quick_start_guide():
# An HDF5 file is a container for two kinds of objects:
# datasets, which are array-like collections of data, and groups, which are folder-like containers that hold datasets and other groups.
# The most fundamental thing to remember when using h5py is:
# Groups work like dictionaries, and datasets work like NumPy arrays.
hdf5_filepath = "./mytestfile.hdf5"
# Create a file.
with h5py.File(hdf5_filepath, "w") as f:
dset = f.create_dataset("mydataset", shape=(100,), dtype="i")
print("f.name = {}.".format(f.name))
print("dset.name = {}.".format(dset.name))
# Attribute.
# The official way to store metadata in HDF5.
dset.attrs["temperature"] = 99.5
print('dset.attrs["temperature"] = {}.'.format(dset.attrs["temperature"]))
print('"temperature" in dset.attrs = {}.'.format("temperature" in dset.attrs))
with h5py.File(hdf5_filepath, "a") as f:
grp = f.create_group("subgroup")
dset2 = grp.create_dataset("another_dataset", shape=(50,), dtype="f")
print("dset2.name = {}.".format(dset2.name))
dset3 = f.create_dataset("subgroup2/dataset_three", shape=(10,), dtype="i")
print("dset3.name = {}.".format(dset3.name))
for name in f:
print(name)
print('"mydataset" in f = {}.'.format("mydataset" in f))
print('"somethingelse" in f = {}.'.format("somethingelse" in f))
print('"subgroup/another_dataset" in f = {}.'.format("subgroup/another_dataset" in f))
print("f.keys() = {}.".format(f.keys()))
print("f.values() = {}.".format(f.values()))
print("f.items() = {}.".format(f.items()))
#print("f.iter() = {}.".format(f.iter())) # AttributeError: 'File' object has no attribute 'iter'.
print('f.get("subgroup/another_dataset") = {}.'.format(f.get("subgroup/another_dataset")))
print('f.get("another_dataset") = {}.'.format(f.get("another_dataset")))
print('f["subgroup/another_dataset"] = {}.'.format(f["subgroup/another_dataset"]))
#print('f["another_dataset"] = {}.'.format(f["another_dataset"])) # KeyError: "Unable to open object (object 'another_dataset' doesn't exist)".
dataset_three = f["subgroup2/dataset_three"]
print("grp.keys() = {}.".format(grp.keys()))
print("grp.values() = {}.".format(grp.values()))
print("grp.items() = {}.".format(grp.items()))
#print("grp.iter() = {}.".format(grp.iter())) # AttributeError: 'Group' object has no attribute 'iter'.
print('grp.get("another_dataset") = {}.'.format(grp.get("another_dataset")))
print('grp.get("subgroup/another_dataset") = {}.'.format(grp.get("subgroup/another_dataset")))
print('grp["another_dataset"] = {}.'.format(grp["another_dataset"]))
#print('grp["subgroup/another_dataset"] = {}.'.format(grp["subgroup/another_dataset"])) # KeyError: 'Unable to open object (component not found)'.
del grp["another_dataset"]
def print_name(name):
print(name)
f.visit(print_name)
def print_item(name, obj):
print(name, obj)
f.visititems(print_item)
with h5py.File(hdf5_filepath, "r+") as f:
dset = f["mydataset"]
print("dset.shape = {}, dset.dtype= {}.".format(dset.shape, dset.dtype))
dset[...] = np.arange(100)
print("dset[0] = {}.".format(dset[0]))
print("dset[10] = {}.".format(dset[10]))
print("dset[0:100:10] = {}.".format(dset[0:100:10]))
def main():
quick_start_guide()
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
| gpl-2.0 |
marctc/django | tests/gis_tests/test_geoip.py | 73 | 5275 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
import warnings
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip import HAS_GEOIP
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOIP:
from django.contrib.gis.geoip import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
for query in (self.fqdn, self.addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query), 'Failed for func %s and query %s' % (func, query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query), 'Failed for func %s and query %s' % (func, query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
for query in (self.fqdn, self.addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("duesseldorf.de")
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
GeoIP()
self.assertEqual(len(warns), 1)
msg = str(warns[0].message)
self.assertIn('django.contrib.gis.geoip is deprecated', msg)
| bsd-3-clause |
moonbury/notebooks | github/MasteringPandas/2060_11_Code/display_iris_dimensions.py | 3 | 1180 | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from IPython.core.pylabtools import figsize
iris_data=load_iris() # Load the iris dataset
figsize(12.5, 10)
fig = plt.figure()
fig.suptitle('Plots of Iris Dimensions', fontsize=14)
fig.subplots_adjust(wspace=0.35,hspace=0.5)
colors=('r','g','b')
cols=[colors[i] for i in iris_data.target]
def get_legend_data(clrs):
leg_data = []
for clr in clrs:
line=plt.Line2D(range(1),range(1),marker='o', color=clr)
leg_data.append(line)
return tuple(leg_data)
def display_iris_dimensions(fig,x_idx, y_idx,sp_idx):
ax = fig.add_subplot(3,2,sp_idx)
ax.scatter(iris_data.data[:, x_idx], iris_data.data[:,y_idx],c=cols)
ax.set_xlabel(iris_data.feature_names[x_idx])
ax.set_ylabel(iris_data.feature_names[y_idx])
leg_data = get_legend_data(colors)
ax.legend(leg_data,iris_data.target_names, numpoints=1,
bbox_to_anchor=(1.265,1.0),prop={'size':8.5})
idx = 1
pairs = [(x,y) for x in range(0,4) for y in range(0,4) if x < y]
for (x,y) in pairs:
display_iris_dimensions(fig,x,y,idx);
idx += 1
| gpl-3.0 |
yhpeng-git/mxnet | python/mxnet/model.py | 1 | 38915 | # pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore is 'local':
# automatically select a proper local, what is the meaning?
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names,
update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
kvstore.init(idx, arg_params[param_names[idx]])
if update_on_kvstore:
kvstore.pull(idx, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
# push gradient, priority is negative index
kvstore.push(index, grad_list, priority=-index)
# pull back the weights
kvstore.pull(index, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
if kvstore:
# push gradient, priority is negative index
kvstore.push(index, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(index, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution latter
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializier : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
nens/raster-tools | raster_tools/srtm/fix_nodata.py | 1 | 2321 | # -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
""" Replace no data value with 0 and pixels with 32767 with 0 too.
Recreates the tifs and leaves old ones as .org files."""
import argparse
import logging
import numpy as np
import os
import sys
import gdal
from raster_tools import datasets
gdal.UseExceptions()
logger = logging.getLogger(__name__)
def fix_nodata(source_paths):
for source_path in source_paths:
# analyze source
source = gdal.Open(source_path)
array = source.ReadAsArray()[np.newaxis, ...]
index = np.where(array == -32767)
no_data_value = source.GetRasterBand(1).GetNoDataValue()
if no_data_value == 0 and index[0].size == 0:
logger.debug('Skip {}'.format(source_path))
continue
# save modified tif
logger.debug('Convert {}'.format(source_path))
array[index] = 0
kwargs = {'no_data_value': 0,
'projection': source.GetProjection(),
'geo_transform': source.GetGeoTransform()}
target_path = '{}.target'.format(source_path)
driver = source.GetDriver()
with datasets.Dataset(array, **kwargs) as target:
target.SetMetadata(source.GetMetadata_List())
target.GetRasterBand(1).SetUnitType(
source.GetRasterBand(1).GetUnitType(),
)
driver.CreateCopy(target_path,
target,
options=['compress=deflate'])
# swap files
source = None
backup_path = '{}.org'.format(source_path)
os.rename(source_path, backup_path)
os.rename(target_path, source_path)
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('source_paths', metavar='SOURCE', nargs='*')
return parser
def main():
""" Call command with args from parser. """
kwargs = vars(get_parser().parse_args())
logging.basicConfig(stream=sys.stderr,
level=logging.DEBUG,
format='%(message)s')
try:
fix_nodata(**kwargs)
return 0
except Exception:
logger.exception('An exception has occurred.')
return 1
| gpl-3.0 |