prompt
stringlengths
123
92.3k
completion
stringlengths
7
132
api
stringlengths
9
35
""" Greedy Word Swap with Word Importance Ranking =================================================== When WIR method is set to ``unk``, this is a reimplementation of the search method from the paper: Is BERT Really Robust? A Strong Baseline for Natural Language Attack on Text Classification and Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and https://github.com/jind11/TextFooler. """ import numpy as np import torch from torch.nn.functional import softmax from textattack.goal_function_results import GoalFunctionResultStatus from textattack.search_methods import SearchMethod from textattack.shared.validators import ( transformation_consists_of_word_swaps_and_deletions, ) class GreedyWordSwapWIR(SearchMethod): """An attack that greedily chooses from a list of possible perturbations in order of index, after ranking indices by importance. Args: wir_method: method for ranking most important words """ def __init__(self, wir_method="unk"): self.wir_method = wir_method def _get_index_order(self, initial_text): """Returns word indices of ``initial_text`` in descending order of importance.""" len_text = len(initial_text.words) if self.wir_method == "unk": leave_one_texts = [ initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text) ] leave_one_results, search_over = self.get_goal_results(leave_one_texts) index_scores = np.array([result.score for result in leave_one_results]) elif self.wir_method == "weighted-saliency": # first, compute word saliency leave_one_texts = [ initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text) ] leave_one_results, search_over = self.get_goal_results(leave_one_texts) saliency_scores = np.array([result.score for result in leave_one_results]) softmax_saliency_scores = softmax( torch.Tensor(saliency_scores), dim=0 ).numpy() # compute the largest change in score we can find by swapping each word delta_ps = [] for idx in range(len_text): transformed_text_candidates = self.get_transformations( initial_text, original_text=initial_text, indices_to_modify=[idx], ) if not transformed_text_candidates: # no valid synonym substitutions for this word delta_ps.append(0.0) continue swap_results, _ = self.get_goal_results(transformed_text_candidates) score_change = [result.score for result in swap_results] max_score_change = np.max(score_change) delta_ps.append(max_score_change) index_scores = softmax_saliency_scores * np.array(delta_ps) elif self.wir_method == "delete": leave_one_texts = [ initial_text.delete_word_at_index(i) for i in range(len_text) ] leave_one_results, search_over = self.get_goal_results(leave_one_texts) index_scores = np.array([result.score for result in leave_one_results]) elif self.wir_method == "random": index_order =
np.arange(len_text)
numpy.arange
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr = np.array(out_arr, copy=False) return out_arr out_arr.units = unit if out_arr.size == 1: return YTQuantity(np.array(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 return YTArray(np.array(out_arr), unit) return ret_class(np.array(out_arr, copy=False), unit) else: # numpy version equal to or newer than 1.13 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): func = getattr(ufunc, method) if 'out' in kwargs: out_orig = kwargs.pop('out') out = np.asarray(out_orig[0]) else: out = None if len(inputs) == 1: _, inp, u = get_inp_u_unary(ufunc, inputs) out_arr = func(np.asarray(inp), out=out, **kwargs) if ufunc in (multiply, divide) and method == 'reduce': power_sign = POWER_SIGN_MAPPING[ufunc] if 'axis' in kwargs and kwargs['axis'] is not None: unit = u**(power_sign*inp.shape[kwargs['axis']]) else: unit = u**(power_sign*inp.size) else: unit = self._ufunc_registry[ufunc](u) ret_class = type(self) elif len(inputs) == 2: unit_operator = self._ufunc_registry[ufunc] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class) elif unit_operator is preserve_units: inps, units = handle_preserve_units( inps, units, ufunc, ret_class) unit = unit_operator(*units) out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs) if unit_operator in (multiply_units, divide_units): out, out_arr, unit = handle_multiply_divide_units( unit, units, out, out_arr) else: raise RuntimeError( "Support for the %s ufunc with %i inputs has not been" "added to YTArray." % (str(ufunc), len(inputs))) if unit is None: out_arr = np.array(out_arr, copy=False) elif ufunc in (modf, divmod_): out_arr = tuple((ret_class(o, unit) for o in out_arr)) elif out_arr.size == 1: out_arr = YTQuantity(np.asarray(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 out_arr = YTArray(np.asarray(out_arr), unit) else: out_arr = ret_class(np.asarray(out_arr), unit) if out is not None: out_orig[0].flat[:] = out.flat[:] if isinstance(out_orig[0], YTArray): out_orig[0].units = unit return out_arr def copy(self, order='C'): return type(self)(np.copy(np.asarray(self)), self.units) def __array_finalize__(self, obj): if obj is None and hasattr(self, 'units'): return self.units = getattr(obj, 'units', NULL_UNIT) def __pos__(self): """ Posify the data. """ # this needs to be defined for all numpy versions, see # numpy issue #9081 return type(self)(super(YTArray, self).__pos__(), self.units) @return_arr def dot(self, b, out=None): return super(YTArray, self).dot(b), self.units*b.units def __reduce__(self): """Pickle reduction method See the documentation for the standard library pickle module: http://docs.python.org/2/library/pickle.html Unit metadata is encoded in the zeroth element of third element of the returned tuple, itself a tuple used to restore the state of the ndarray. This is always defined for numpy arrays. """ np_ret = super(YTArray, self).__reduce__() obj_state = np_ret[2] unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],) new_ret = np_ret[:2] + unit_state + np_ret[3:] return new_ret def __setstate__(self, state): """Pickle setstate method This is called inside pickle.read() and restores the unit data from the metadata extracted in __reduce__ and then serialized by pickle. """ super(YTArray, self).__setstate__(state[1:]) try: unit, lut = state[0] except TypeError: # this case happens when we try to load an old pickle file # created before we serialized the unit symbol lookup table # into the pickle file unit, lut = str(state[0]), default_unit_symbol_lut.copy() # need to fix up the lut if the pickle was saved prior to PR #1728 # when the pickle format changed if len(lut['m']) == 2: lut.update(default_unit_symbol_lut) for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]: lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}') registry = UnitRegistry(lut=lut, add_default_symbols=False) self.units = Unit(unit, registry=registry) def __deepcopy__(self, memodict=None): """copy.deepcopy implementation This is necessary for stdlib deepcopy of arrays and quantities. """ if memodict is None: memodict = {} ret = super(YTArray, self).__deepcopy__(memodict) return type(self)(ret, copy.deepcopy(self.units)) class YTQuantity(YTArray): """ A scalar associated with a unit. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). registry : A UnitRegistry object The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Examples -------- >>> from yt import YTQuantity >>> a = YTQuantity(1, 'cm') >>> b = YTQuantity(2, 'm') >>> a + b 201.0 cm >>> b + a 2.01 m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTQuantity(12, 'g/cm**3') >>> np.abs(a) 12 g/cm**3 and strip them when it would be annoying to deal with them. >>> print(np.log10(a)) 1.07918124605 YTQuantity is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.quan(5, 'code_length') >>> a.in_cgs() 1.543e+25 cm This is equivalent to: >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ def __new__(cls, input_scalar, input_units=None, registry=None, dtype=np.float64, bypass_validation=False): if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)): raise RuntimeError("YTQuantity values must be numeric") ret = YTArray.__new__(cls, input_scalar, input_units, registry, dtype=dtype, bypass_validation=bypass_validation) if ret.size > 1: raise RuntimeError("YTQuantity instances must be scalars") return ret def __repr__(self): return str(self) def validate_numpy_wrapper_units(v, arrs): if not any(isinstance(a, YTArray) for a in arrs): return v if not all(isinstance(a, YTArray) for a in arrs): raise RuntimeError("Not all of your arrays are YTArrays.") a1 = arrs[0] if not all(a.units == a1.units for a in arrs[1:]): raise RuntimeError("Your arrays must have identical units.") v.units = a1.units return v def uconcatenate(arrs, axis=0): """Concatenate a sequence of arrays. This wrapper around numpy.concatenate preserves units. All input arrays must have the same units. See the documentation of numpy.concatenate for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uconcatenate((A, B)) YTArray([ 1., 2., 3., 2., 3., 4.]) cm """ v = np.concatenate(arrs, axis=axis) v = validate_numpy_wrapper_units(v, arrs) return v def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None): """Applies the cross product to two YT arrays. This wrapper around numpy.cross preserves units. See the documentation of numpy.cross for full details. """ v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) units = arr1.units * arr2.units arr = YTArray(v, units, registry=registry) return arr def uintersect1d(arr1, arr2, assume_unique=False): """Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uintersect1d(A, B) YTArray([ 2., 3.]) cm """ v = np.intersect1d(arr1, arr2, assume_unique=assume_unique) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def uunion1d(arr1, arr2): """Find the union of two arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uunion1d(A, B) YTArray([ 1., 2., 3., 4.]) cm """ v = np.union1d(arr1, arr2) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def unorm(data, ord=None, axis=None, keepdims=False): """Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. The keepdims argument is ignored if the version of numpy installed is older than numpy 1.10.0. """ if LooseVersion(np.__version__) < LooseVersion('1.10.0'): norm = np.linalg.norm(data, ord=ord, axis=axis) else: norm =
np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
numpy.linalg.norm
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time =
np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
numpy.linspace
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101),
np.linspace(-2, 2, 101)
numpy.linspace
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim =
np.zeros((grid_pixels_2d_slim.shape[0], 2))
numpy.zeros
""" Binary serialization NPY format ========== A simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for persisting a *single* arbitrary NumPy array on disk. The format stores all of the shape and dtype information necessary to reconstruct the array correctly even on another machine with a different architecture. The format is designed to be as simple as possible while achieving its limited goals. The ``.npz`` format is the standard format for persisting *multiple* NumPy arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` files, one for each array. Capabilities ------------ - Can represent all NumPy arrays including nested record arrays and object arrays. - Represents the data in its native binary form. - Supports Fortran-contiguous arrays directly. - Stores all of the necessary information to reconstruct the array including shape and dtype on a machine of a different architecture. Both little-endian and big-endian arrays are supported, and a file with little-endian numbers will yield a little-endian array on any machine reading the file. The types are described in terms of their actual sizes. For example, if a machine with a 64-bit C "long int" writes out an array with "long ints", a reading machine with 32-bit C "long ints" will yield an array with 64-bit integers. - Is straightforward to reverse engineer. Datasets often live longer than the programs that created them. A competent developer should be able to create a solution in their preferred programming language to read most ``.npy`` files that they have been given without much documentation. - Allows memory-mapping of the data. See `open_memmap`. - Can be read from a filelike stream object instead of an actual file. - Stores object arrays, i.e. arrays containing elements that are arbitrary Python objects. Files with object arrays are not to be mmapable, but can be read and written to disk. Limitations ----------- - Arbitrary subclasses of numpy.ndarray are not completely preserved. Subclasses will be accepted for writing, but only the array data will be written out. A regular numpy.ndarray object will be created upon reading the file. .. warning:: Due to limitations in the interpretation of structured dtypes, dtypes with fields with empty names will have the names replaced by 'f0', 'f1', etc. Such arrays will not round-trip through the format entirely accurately. The data is intact; only the field names will differ. We are working on a fix for this. This fix will not require a change in the file format. The arrays with such structures can still be saved and restored, and the correct dtype may be restored by using the ``loadedarray.view(correct_dtype)`` method. File extensions --------------- We recommend using the ``.npy`` and ``.npz`` extensions for files saved in this format. This is by no means a requirement; applications may wish to use these file formats but use an extension specific to the application. In the absence of an obvious alternative, however, we suggest using ``.npy`` and ``.npz``. Version numbering ----------------- The version numbering of these formats is independent of NumPy version numbering. If the format is upgraded, the code in `numpy.io` will still be able to read and write Version 1.0 files. Format Version 1.0 ------------------ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. ``\\x01``. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. ``\\x00``. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to make the total of ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible by 64 for alignment purposes. The dictionary contains three keys: "descr" : dtype.descr An object that can be passed as an argument to the `numpy.dtype` constructor to create the array's dtype. "fortran_order" : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. "shape" : tuple of int The shape of the array. For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. ``dtype.hasobject is True``), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on ``fortran_order``) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that ``shape=()`` means there is 1 element) by ``dtype.itemsize``. Format Version 2.0 ------------------ The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. `numpy.save` will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: "The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN." Format Version 3.0 ------------------ This version replaces the ASCII string (which in practice was latin1) with a utf8-encoded string, so supports structured types with any unicode field names. Notes ----- The ``.npy`` format, including motivation for creating it and a comparison of alternatives, is described in the :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have evolved with time and this document is more current. """ import numpy import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import ( isfileobj, os_fspath, pickle ) __all__ = [] EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length # instead of 2 bytes (H) allowing storage of large structured arrays _header_size_info = { (1, 0): ('<H', 'latin1'), (2, 0): ('<I', 'latin1'), (3, 0): ('<I', 'utf8'), } def _check_version(version): if version not in [(1, 0), (2, 0), (3, 0), None]: msg = "we only support format version (1,0), (2,0), and (3,0), not %s" raise ValueError(msg % (version,)) def magic(major, minor): """ Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted. """ if major < 0 or major > 255: raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int """ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) major, minor = magic_str[-2:] return major, minor def _has_metadata(dt): if dt.metadata is not None: return True elif dt.names is not None: return any(_has_metadata(dt[k]) for k in dt.names) elif dt.subdtype is not None: return _has_metadata(dt.base) else: return False def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if _has_metadata(dtype): warnings.warn("metadata on a dtype may be saved or ignored, but will " "raise if saved when read. Use another form of storage.", UserWarning, stacklevel=2) if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str def descr_to_dtype(descr): """ Returns a dtype based off the given description. This is essentially the reverse of `dtype_to_descr()`. It will remove the valueless padding fields created by, i.e. simple fields like dtype('float32'), and then convert the description to its corresponding dtype. Parameters ---------- descr : object The object retreived by dtype.descr. Can be passed to `numpy.dtype()` in order to replicate the input dtype. Returns ------- dtype : dtype The dtype constructed by the description. """ if isinstance(descr, str): # No padding removal needed return numpy.dtype(descr) elif isinstance(descr, tuple): # subtype, will always have a shape descr[1] dt = descr_to_dtype(descr[0]) return numpy.dtype((dt, descr[1])) titles = [] names = [] formats = [] offsets = [] offset = 0 for field in descr: if len(field) == 2: name, descr_str = field dt = descr_to_dtype(descr_str) else: name, descr_str, shape = field dt = numpy.dtype((descr_to_dtype(descr_str), shape)) # Ignore padding bytes, which will be void bytes with '' as name # Once support for blank names is removed, only "if name == ''" needed) is_pad = (name == '' and dt.type is numpy.void and dt.names is None) if not is_pad: title, name = name if isinstance(name, tuple) else (None, name) titles.append(title) names.append(name) formats.append(dt) offsets.append(offset) offset += dt.itemsize return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets, 'itemsize': offset}) def header_data_from_array_1_0(array): """ Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file. """ d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: d['fortran_order'] = True else: # Totally non-contiguous data. We will have to make it C-contiguous # before writing. Note that we need to test for C_CONTIGUOUS first # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. d['fortran_order'] = False d['descr'] = dtype_to_descr(array.dtype) return d def _wrap_header(header, version): """ Takes a stringified header, and attaches the prefix and padding to it """ import struct assert version is not None fmt, encoding = _header_size_info[version] if not isinstance(header, bytes): # always true on python 3 header = header.encode(encoding) hlen = len(header) + 1 padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) try: header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) except struct.error: msg = "Header length {} too big for version={}".format(hlen, version) raise ValueError(msg) from None # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes # aligned up to ARRAY_ALIGN on systems like Linux where mmap() # offset must be page-aligned (i.e. the beginning of the file). return header_prefix + header + b' '*padlen + b'\n' def _wrap_header_guess_version(header): """ Like `_wrap_header`, but chooses an appropriate version given the contents """ try: return _wrap_header(header, (1, 0)) except ValueError: pass try: ret = _wrap_header(header, (2, 0)) except UnicodeEncodeError: pass else: warnings.warn("Stored array in format 2.0. It can only be" "read by NumPy >= 1.9", UserWarning, stacklevel=2) return ret header = _wrap_header(header, (3, 0)) warnings.warn("Stored array in format 3.0. It can only be " "read by NumPy >= 1.17", UserWarning, stacklevel=2) return header def _write_array_header(fp, d, version=None): """ Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version: tuple or None None means use oldest that works explicit version will raise a ValueError if the format does not allow saving this data. Default: None """ header = ["{"] for key, value in sorted(d.items()): # Need to use repr here, since we eval these when reading header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) if version is None: header = _wrap_header_guess_version(header) else: header = _wrap_header(header, version) fp.write(header) def write_array_header_1_0(fp, d): """ Write the header for an array using the 1.0 format. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (1, 0)) def write_array_header_2_0(fp, d): """ Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (2, 0)) def read_array_header_1_0(fp): """ Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(1, 0)) def read_array_header_2_0(fp): """ Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(2, 0)) def _filter_header(s): """Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header. """ import tokenize from io import StringIO tokens = [] last_token_was_number = False for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and token_type == tokenize.NAME and token_string == "L"): continue else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) return tokenize.untokenize(tokens) def _read_array_header(fp, version): """ see read_array_header_1_0 """ # Read an unsigned, little-endian short int which has the length of the # header. import struct hinfo = _header_size_info.get(version) if hinfo is None: raise ValueError("Invalid version {!r}".format(version)) hlength_type, encoding = hinfo hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") header_length = struct.unpack(hlength_type, hlength_str)[0] header = _read_bytes(fp, header_length, "array header") header = header.decode(encoding) # The header is a pretty-printed string representation of a literal # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool # "descr" : dtype.descr # Versions (2, 0) and (1, 0) could have been created by a Python 2 # implementation before header filtering was implemented. if version <= (2, 0): header = _filter_header(header) try: d = safe_eval(header) except SyntaxError as e: msg = "Cannot parse header: {!r}" raise ValueError(msg.format(header)) from e if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) if EXPECTED_KEYS != d.keys(): keys = sorted(d.keys()) msg = "Header does not contain the correct keys: {!r}" raise ValueError(msg.format(keys)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or not all(isinstance(x, int) for x in d['shape'])): msg = "shape is not valid: {!r}" raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): msg = "fortran_order is not a valid bool: {!r}" raise ValueError(msg.format(d['fortran_order'])) try: dtype = descr_to_dtype(d['descr']) except TypeError as e: msg = "descr is not a valid dtype descriptor: {!r}" raise ValueError(msg.format(d['descr'])) from e return d['shape'], d['fortran_order'], dtype def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): """ Write an array to an NPY file, including a header. If the array is neither C-contiguous nor Fortran-contiguous AND the file_like object is not a real file object, this function will have to copy data in memory. Parameters ---------- fp : file_like object An open, writable file object, or similar object with a ``.write()`` method. array : ndarray The array to write to disk. version : (int, int) or None, optional The version number of the format. None means use the oldest supported version that is able to store the data. Default: None allow_pickle : bool, optional Whether to allow writing pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object arrays on Python 3 to Python 2 compatible format. Raises ------ ValueError If the array cannot be persisted. This includes the case of allow_pickle=False and array being an object array. Various other errors If the array contains Python objects as part of its dtype, the process of pickling them may raise various errors if the objects are not picklable. """ _check_version(version) _write_array_header(fp, header_data_from_array_1_0(array), version) if array.itemsize == 0: buffersize = 0 else: # Set buffer size to 16 MiB to hide the Python loop overhead. buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) if array.dtype.hasobject: # We contain Python objects so we cannot write out the data # directly. Instead, we will pickle it out if not allow_pickle: raise ValueError("Object arrays cannot be saved when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} pickle.dump(array, fp, protocol=3, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'): fp.write(chunk.tobytes('C')) else: if isfileobj(fp): array.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='C'): fp.write(chunk.tobytes('C')) def read_array(fp, allow_pickle=False, pickle_kwargs=None): """ Read an array from an NPY file. Parameters ---------- fp : file_like object If this is not a real file object, then this may take extra memory and time. allow_pickle : bool, optional Whether to allow writing pickled data. Default: False .. versionchanged:: 1.16.3 Made default False in response to CVE-2019-6446. pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Returns ------- array : ndarray The array from the data on disk. Raises ------ ValueError If the data is invalid, or allow_pickle=False and the file contains an object array. """ version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if len(shape) == 0: count = 1 else: count = numpy.multiply.reduce(shape, dtype=numpy.int64) # Now read the actual data. if dtype.hasobject: # The array contained Python objects. We need to unpickle the data. if not allow_pickle: raise ValueError("Object arrays cannot be loaded when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} try: array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: # Friendlier error message raise UnicodeError("Unpickling a python object failed: %r\n" "You may need to pass the encoding= option " "to numpy.load" % (err,)) from err else: if isfileobj(fp): # We can use the fast fromfile() function. array = numpy.fromfile(fp, dtype=dtype, count=count) else: # This is not a real file. We have to read it the # memory-intensive way. # crc32 module fails on reads greater than 2 ** 32 bytes, # breaking large reads from gzip streams. Chunk reads to # BUFFER_SIZE bytes to avoid issue and reduce memory overhead # of the read. In non-chunked case count < max_read_count, so # only one read is performed. # Use np.ndarray instead of np.empty since the latter does # not correctly instantiate zero-width string dtypes; see # https://github.com/numpy/numpy/pull/6430 array = numpy.ndarray(count, dtype=dtype) if dtype.itemsize > 0: # If dtype.itemsize == 0 then there's nothing more to read max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) for i in range(0, count, max_read_count): read_count = min(max_read_count, count - i) read_size = int(read_count * dtype.itemsize) data = _read_bytes(fp, read_size, "array data") array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, count=read_count) if fortran_order: array.shape = shape[::-1] array = array.transpose() else: array.shape = shape return array def open_memmap(filename, mode='r+', dtype=None, shape=None, fortran_order=False, version=None): """ Open a .npy file as a memory-mapped array. This may be used to read an existing file or create a new one. Parameters ---------- filename : str or path-like The name of the file on disk. This may *not* be a file-like object. mode : str, optional The mode in which to open the file; the default is 'r+'. In addition to the standard file modes, 'c' is also accepted to mean "copy on write." See `memmap` for the available mode strings. dtype : data-type, optional The data type of the array if we are creating a new file in "write" mode, if not, `dtype` is ignored. The default value is None, which results in a data-type of `float64`. shape : tuple of int The shape of the array if we are creating a new file in "write" mode, in which case this parameter is required. Otherwise, this parameter is ignored and is thus optional. fortran_order : bool, optional Whether the array should be Fortran-contiguous (True) or C-contiguous (False, the default) if we are creating a new file in "write" mode. version : tuple of int (major, minor) or None If the mode is a "write" mode, then this is the version of the file format used to create the file. None means use the oldest supported version that is able to store the data. Default: None Returns ------- marray : memmap The memory-mapped array. Raises ------ ValueError If the data or the mode is invalid. IOError If the file is not found or cannot be opened correctly. See Also -------- numpy.memmap """ if isfileobj(filename): raise ValueError("Filename must be a string or a path-like object." " Memmap cannot use existing file handles.") if 'w' in mode: # We are creating the file, not reading it. # Check if we ought to create the file. _check_version(version) # Ensure that the given dtype is an authentic dtype object rather # than just something that can be interpreted as a dtype object. dtype = numpy.dtype(dtype) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) d = dict( descr=dtype_to_descr(dtype), fortran_order=fortran_order, shape=shape, ) # If we got here, then it should be safe to create the file. with open(os_fspath(filename), mode+'b') as fp: _write_array_header(fp, d, version) offset = fp.tell() else: # Read the header of the file first. with open(
os_fspath(filename)
numpy.compat.os_fspath
import numpy as np import pytest import theano import theano.tensor as tt # Don't import test classes otherwise they get tested as part of the file from tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip("pygpu") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input="raise", name=None, ): if mode is None: mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 - 1 dtype = kwargs.pop("dtype", theano.config.floatX) cls = kwargs.pop("cls", None) if len(kwargs) != 0: raise TypeError("Unexpected argument %s", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None: checks = {} _op = op _gpu_op = gpu_op _cases = cases _skip = skip _checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip = _skip checks = _checks def setup_method(self): eval(self.__class__.__module__ + "." + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname, inputs in cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp) for inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while making " "a node with inputs %s" ) % (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while trying to " "make a Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds = f_ref() except Exception as exc: ref_e = exc try: variables = f_tst() except Exception as exc: if ref_e is None: err_msg = ( "Test %s::%s: exception when calling the " "Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise else: # if we raised an exception of the same type we're good. if isinstance(exc, type(ref_e)): return else: err_msg = ( "Test %s::%s: exception raised during test " "call was not the same as the reference " "call (got: %s, expected %s)" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not condition, ( "Test %s::%s: Output %s gave the wrong " "value. With inputs %s, expected %s " "(dtype %s), got %s (dtype %s)." % ( self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype, ) ) for description, check in self.checks.items(): assert check(inputs, variables), ( "Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)" ) % (self.op, testname, description, inputs, variables) Checker.__name__ = name if hasattr(Checker, "__qualname__"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype="float32", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( "cut_gpua_host_transfers", "local_cut_gpua_host_gpua" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just to ensure that it works in theano # libgpuarray has a much more comprehensive suit of tests to # ensure correctness a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 8), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc = makeTester( name="GpuAllocTester", # The +1 is there to allow the lift to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = "float32" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in ["float32", "int8"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape == (2, 3) assert out.dtype == dt f = theano.function( [], [ GpuAllocEmpty("uint64", test_ctx_name)(3, 2), GpuAllocEmpty("uint64", test_ctx_name)(3, 2), ], ) out = f() assert out[0].shape == (3, 2) assert out[0].dtype == "uint64" assert out[1].shape == (3, 2) assert out[1].dtype == "uint64" assert ( len( [ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def test_shape(): x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if theano.config.mode != "FAST_COMPILE": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding("local_shape_to_shape_i") f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix("a") i = tt.iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = ["float64", "float32"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding("constant_folding") self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use join instead of MakeVector since there is no MakeVector on GPU self.make_vector_op = GpuJoin() # this is to avoid errors with limited devices self.floatX = "float32" self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): # Test that we move the node to the GPU # Also test float16 computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype("float16")) o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == "float16" f = theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix("a") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") b = tt.fmatrix("b") b_val = np.asarray(np.random.rand(3, 5), dtype="float32") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for dtype in ["float32", "int32", "float16"]: check(dtype, 3) # M != N, k = 0 check(dtype, 3, 5) check(dtype, 5, 3) # N == M, k != 0 check(dtype, 3, 3, 1) check(dtype, 3, 3, -1) # N < M, k != 0 check(dtype, 3, 5, 1) check(dtype, 3, 5, -1) # N > M, k != 0 check(dtype, 5, 3, 1) check(dtype, 5, 3, -1) # k > M, -k > N, k > M, k > N check(dtype, 5, 3, 3) check(dtype, 3, 5, 3) check(dtype, 5, 3, -3) check(dtype, 3, 5, -3) check(dtype, 5, 3, 6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): # Test that the shape is lifted over hostfromgpu m = mode_with_gpu.including( "local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize" ) a = tt.fmatrix("a") ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))() av = np.asarray(np.random.rand(5, 4), dtype="float32") cv = gpuarray.asarray( np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): # Test Gpujoin to work inplace. # # This function tests the case when several elements are passed to the # Gpujoin function but all except one of them are empty. In this case # Gpujoin should work inplace and the output should be the view of the # non-empty element. s = tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0) c = join(0, x, z) f = theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in ["float64", "float32", "float16"]: # try a big one m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) def test_gputri(): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.tri(N, M_, k, dtype=dtype)) assert result.dtype ==
np.dtype(dtype)
numpy.dtype
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 =
np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
numpy.array
import numpy as np from stumpff import C, S from CelestialBody import BODIES from numerical import newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \ (1 - alpha*r0)*chi**3*S(z) + \ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal anomaly chi using the specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float` (s) time after initial state to solve for r, v as 3-vectors :param body: `CelestialBody` (--) the celestial body to use for orbital parameters :param method: `str` (--) which numerical method to use to solve Kepler's Equation :param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision) :param max_iters: `int` (--) maximum number of iterations in numerical method before breaking :return: (km) final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method == 'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else: # method == 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f = calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi, alpha) r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' # TODO: have this function make use of one of the numerical methods in numerical.py def f(E, e, Me): return E - e*np.sin(E) - Me def fp(E, e): return 1 - e*np.cos(E) E = Me + e/2 if Me < np.pi else Me - e/2 ratio = f(E, e, Me)/fp(E, e) iters = 0 while abs(ratio) > tol and iters < max_iters: E -= ratio ratio = f(E, e, Me)/fp(E, e) iters += 1 E -= ratio converged =
np.abs(ratio)
numpy.abs
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(
np.arange(1., 10.)
numpy.arange
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time =
np.linspace(minima_x[-1], maxima_x[-1], 101)
numpy.linspace
# -*- encoding:utf-8 -*- # @Time : 2021/1/3 15:15 # @Author : gfjiang import os.path as osp import mmcv import numpy as np import cvtools import matplotlib.pyplot as plt import cv2.cv2 as cv from functools import partial import torch import math from cvtools.utils.path import add_prefix_filename_suffix from mmdet.ops import nms from mmdet.apis import init_detector, inference_detector def draw_features(module, input, output, work_dir='./'): x = output.cpu().numpy() out_channels = list(output.shape)[1] height = int(math.sqrt(out_channels)) width = height if list(output.shape)[2] < 128: return fig = plt.figure(figsize=(32, 32)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05) for i in range(height * width): plt.subplot(height, width, i + 1) plt.axis('off') img = x[0, i, :, :] pmin = np.min(img) pmax =
np.max(img)
numpy.max
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time = np.asarray(time) # compare other packages Carbon Dioxide - top pyemd = pyemd0215() py_emd = pyemd(signal) IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert') print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_pyemd.png') plt.show() emd_sift = emd040.sift.sift(signal) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert') print(f'emd annual frequency error: {np.round(sum(np.abs(IF - np.ones_like(IF)))[0], 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_emd.png') plt.show() # compare other packages Carbon Dioxide - bottom knots = np.linspace(time[0], time[-1], 200) emd_example = AdvEMDpy.EMD(time=time, time_series=signal) imfs, hts, ifs, _, _, _, _ = \ emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False) print(f'AdvEMDpy annual frequency error: {np.round(sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}') fig, axs = plt.subplots(2, 2) plt.subplots_adjust(hspace=0.5) axs[0, 0].plot(time, signal) axs[0, 1].plot(time, signal) axs[0, 1].plot(time, imfs[0, :], label='Smoothed') axs[0, 1].legend(loc='lower right') axs[1, 0].plot(time, imfs[1, :]) axs[1, 1].plot(time, imfs[2, :]) axis = 0 for ax in axs.flat: if axis == 0: ax.set(ylabel=R'C0$_2$ concentration') if axis == 1: pass if axis == 2: ax.set(ylabel=R'C0$_2$ concentration') ax.set(xlabel='Time (years)') if axis == 3: ax.set(xlabel='Time (years)') axis += 1 plt.gcf().subplots_adjust(bottom=0.15) axs[0, 0].set_title(r'Original CO$_2$ Concentration') axs[0, 1].set_title('Smoothed CO$_2$ Concentration') axs[1, 0].set_title('IMF 1') axs[1, 1].set_title('Residual') plt.gcf().subplots_adjust(bottom=0.15) plt.savefig('jss_figures/CO2_EMD.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, max_frequency=10, which_imfs=[1], plot=False) x_hs, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0,
np.abs(z)
numpy.abs
"""Python interfaces to DGL farthest point sampler.""" from dgl._ffi.base import DGLError import numpy as np from .._ffi.function import _init_api from .. import backend as F from .. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r"""Farthest Point Sampler Parameters ---------- data : tensor A tensor of shape (N, d) where N is the number of points and d is the dimension. batch_size : int The number of batches in the ``data``. N should be divisible by batch_size. sample_points : int The number of points to sample in each batch. dist : tensor Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx : tensor of int Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch. result : tensor of int Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index. Returns ------- No return value. The input variable ``result`` will be overwriten with sampled indices. """ assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): """ Description ----------- The neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight) until no match can be done. If no edge weight is given, this procedure will randomly pick neighbor for each vertex. The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int The number of nodes in this homogeneous graph. edge_weight : tensor, optional The edge weight tensor holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting node labels to have consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor A vector with each element that indicates the cluster ID of a vertex. """ edge_weight_capi = nd.NULL["int64"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError("Find unmatched node") # reorder node id # TODO: actually we can add `return_inverse` option for `unique` # function in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np =
np.unique(node_label_np, return_inverse=True)
numpy.unique
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 *
np.ones(101)
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time =
np.linspace(0, 2 * np.pi, 1001)
numpy.linspace
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) + (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) + (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) + (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) / 4.0 ) # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0') adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0') adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1') adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1') adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2') adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2') adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3') adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3') adm_scale0_scores_key = cls.get_scores_key('adm_scale0') adm_scale1_scores_key = cls.get_scores_key('adm_scale1') adm_scale2_scores_key = cls.get_scores_key('adm_scale2') adm_scale3_scores_key = cls.get_scores_key('adm_scale3') result.result_dict[adm_scale0_scores_key] = list( (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (
np.array(result.result_dict[adm_den_scale0_scores_key])
numpy.array
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]: raise Exception('clip error') if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2: raise Exception('clip error') perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) is_shrink = False if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]: is_shrink = True synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 '''shrink fiducial points''' center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 fiducial_points_coordinate_copy = fiducial_points_coordinate.copy() shrink_x = im_lr/(perturbed_x_max - perturbed_x_min) shrink_y = im_ud/(perturbed_y_max - perturbed_y_min) fiducial_points_coordinate *= [shrink_x, shrink_y] center_x_l *= shrink_x center_y_l *= shrink_y # fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y] # fiducial_points_coordinate[1:, :1, 0] *= shrink_x # fiducial_points_coordinate[:1, 1:, 1] *= shrink_y # perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape) self.synthesis_perturbed_img =
np.full_like(self.synthesis_perturbed_img, 256)
numpy.full_like
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr = np.array(out_arr, copy=False) return out_arr out_arr.units = unit if out_arr.size == 1: return YTQuantity(np.array(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 return YTArray(np.array(out_arr), unit) return ret_class(np.array(out_arr, copy=False), unit) else: # numpy version equal to or newer than 1.13 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): func = getattr(ufunc, method) if 'out' in kwargs: out_orig = kwargs.pop('out') out = np.asarray(out_orig[0]) else: out = None if len(inputs) == 1: _, inp, u = get_inp_u_unary(ufunc, inputs) out_arr = func(np.asarray(inp), out=out, **kwargs) if ufunc in (multiply, divide) and method == 'reduce': power_sign = POWER_SIGN_MAPPING[ufunc] if 'axis' in kwargs and kwargs['axis'] is not None: unit = u**(power_sign*inp.shape[kwargs['axis']]) else: unit = u**(power_sign*inp.size) else: unit = self._ufunc_registry[ufunc](u) ret_class = type(self) elif len(inputs) == 2: unit_operator = self._ufunc_registry[ufunc] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class) elif unit_operator is preserve_units: inps, units = handle_preserve_units( inps, units, ufunc, ret_class) unit = unit_operator(*units) out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs) if unit_operator in (multiply_units, divide_units): out, out_arr, unit = handle_multiply_divide_units( unit, units, out, out_arr) else: raise RuntimeError( "Support for the %s ufunc with %i inputs has not been" "added to YTArray." % (str(ufunc), len(inputs))) if unit is None: out_arr = np.array(out_arr, copy=False) elif ufunc in (modf, divmod_): out_arr = tuple((ret_class(o, unit) for o in out_arr)) elif out_arr.size == 1: out_arr = YTQuantity(np.asarray(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 out_arr = YTArray(np.asarray(out_arr), unit) else: out_arr = ret_class(np.asarray(out_arr), unit) if out is not None: out_orig[0].flat[:] = out.flat[:] if isinstance(out_orig[0], YTArray): out_orig[0].units = unit return out_arr def copy(self, order='C'): return type(self)(np.copy(np.asarray(self)), self.units) def __array_finalize__(self, obj): if obj is None and hasattr(self, 'units'): return self.units = getattr(obj, 'units', NULL_UNIT) def __pos__(self): """ Posify the data. """ # this needs to be defined for all numpy versions, see # numpy issue #9081 return type(self)(super(YTArray, self).__pos__(), self.units) @return_arr def dot(self, b, out=None): return super(YTArray, self).dot(b), self.units*b.units def __reduce__(self): """Pickle reduction method See the documentation for the standard library pickle module: http://docs.python.org/2/library/pickle.html Unit metadata is encoded in the zeroth element of third element of the returned tuple, itself a tuple used to restore the state of the ndarray. This is always defined for numpy arrays. """ np_ret = super(YTArray, self).__reduce__() obj_state = np_ret[2] unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],) new_ret = np_ret[:2] + unit_state + np_ret[3:] return new_ret def __setstate__(self, state): """Pickle setstate method This is called inside pickle.read() and restores the unit data from the metadata extracted in __reduce__ and then serialized by pickle. """ super(YTArray, self).__setstate__(state[1:]) try: unit, lut = state[0] except TypeError: # this case happens when we try to load an old pickle file # created before we serialized the unit symbol lookup table # into the pickle file unit, lut = str(state[0]), default_unit_symbol_lut.copy() # need to fix up the lut if the pickle was saved prior to PR #1728 # when the pickle format changed if len(lut['m']) == 2: lut.update(default_unit_symbol_lut) for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]: lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}') registry = UnitRegistry(lut=lut, add_default_symbols=False) self.units = Unit(unit, registry=registry) def __deepcopy__(self, memodict=None): """copy.deepcopy implementation This is necessary for stdlib deepcopy of arrays and quantities. """ if memodict is None: memodict = {} ret = super(YTArray, self).__deepcopy__(memodict) return type(self)(ret, copy.deepcopy(self.units)) class YTQuantity(YTArray): """ A scalar associated with a unit. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). registry : A UnitRegistry object The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Examples -------- >>> from yt import YTQuantity >>> a = YTQuantity(1, 'cm') >>> b = YTQuantity(2, 'm') >>> a + b 201.0 cm >>> b + a 2.01 m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTQuantity(12, 'g/cm**3') >>> np.abs(a) 12 g/cm**3 and strip them when it would be annoying to deal with them. >>> print(np.log10(a)) 1.07918124605 YTQuantity is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.quan(5, 'code_length') >>> a.in_cgs() 1.543e+25 cm This is equivalent to: >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ def __new__(cls, input_scalar, input_units=None, registry=None, dtype=np.float64, bypass_validation=False): if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)): raise RuntimeError("YTQuantity values must be numeric") ret = YTArray.__new__(cls, input_scalar, input_units, registry, dtype=dtype, bypass_validation=bypass_validation) if ret.size > 1: raise RuntimeError("YTQuantity instances must be scalars") return ret def __repr__(self): return str(self) def validate_numpy_wrapper_units(v, arrs): if not any(isinstance(a, YTArray) for a in arrs): return v if not all(isinstance(a, YTArray) for a in arrs): raise RuntimeError("Not all of your arrays are YTArrays.") a1 = arrs[0] if not all(a.units == a1.units for a in arrs[1:]): raise RuntimeError("Your arrays must have identical units.") v.units = a1.units return v def uconcatenate(arrs, axis=0): """Concatenate a sequence of arrays. This wrapper around numpy.concatenate preserves units. All input arrays must have the same units. See the documentation of numpy.concatenate for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uconcatenate((A, B)) YTArray([ 1., 2., 3., 2., 3., 4.]) cm """ v = np.concatenate(arrs, axis=axis) v = validate_numpy_wrapper_units(v, arrs) return v def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None): """Applies the cross product to two YT arrays. This wrapper around numpy.cross preserves units. See the documentation of numpy.cross for full details. """ v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) units = arr1.units * arr2.units arr = YTArray(v, units, registry=registry) return arr def uintersect1d(arr1, arr2, assume_unique=False): """Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uintersect1d(A, B) YTArray([ 2., 3.]) cm """ v =
np.intersect1d(arr1, arr2, assume_unique=assume_unique)
numpy.intersect1d
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 2: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgf[1] - veh_striking[3]) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2)) veh_RDS = V1_v * np.sin(delta_angle_2) veh_e = 1.5 / veh_RDS elif veh_striking[0] == 3: veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1]) veh_a1 = np.abs(veh_cgs[0] - veh_striking[3]) veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v) veh_a2 = np.abs(
np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)
numpy.sqrt
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (
np.array(result.result_dict[vif_num_scale0_scores_key])
numpy.array
#!/usr/bin/env python # encoding: utf-8 import numbers import os import re import sys from itertools import chain import numpy as np import scipy.sparse as sp import six import pickle from .model import get_convo_nn2 from .stop_words import THAI_STOP_WORDS from .utils import CHAR_TYPES_MAP, CHARS_MAP, create_feature_array MODULE_PATH = os.path.dirname(__file__) WEIGHT_PATH = os.path.join(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5') TOKENIZER = None def tokenize(text, custom_dict=None): """ Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('ตัดคำได้ดีมาก') >> ['ตัดคำ','ได้','ดี','มาก'] """ global TOKENIZER if not TOKENIZER: TOKENIZER = DeepcutTokenizer() return TOKENIZER.tokenize(text, custom_dict=custom_dict) def _custom_dict(word, text, word_end): word_length = len(word) initial_loc = 0 while True: try: start_char = re.search(word, text).start() first_char = start_char + initial_loc last_char = first_char + word_length - 1 initial_loc += start_char + word_length text = text[start_char + word_length:] word_end[first_char:last_char] = (word_length - 1) * [0] word_end[last_char] = 1 except: break return word_end def _document_frequency(X): """ Count the number of non-zero values for each feature in sparse X. """ if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) return np.diff(sp.csc_matrix(X, copy=False).indptr) def _check_stop_list(stop): """ Check stop words list ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95 """ if stop == "thai": return THAI_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None # assume it's a collection return frozenset(stop) def load_model(file_path): """ Load saved pickle file of DeepcutTokenizer Parameters ========== file_path: str, path to saved model from ``save_model`` method in DeepcutTokenizer """ tokenizer = pickle.load(open(file_path, 'rb')) tokenizer.model = get_convo_nn2() tokenizer.model = tokenizer.model.load_weights(WEIGHT_PATH) return tokenizer class DeepcutTokenizer(object): """ Class for tokenizing given Thai text documents using deepcut library Parameters ========== ngram_range : tuple, tuple for ngram range for vocabulary, (1, 1) for unigram and (1, 2) for bigram stop_words : list or set, list or set of stop words to be removed if None, max_df can be set to value [0.7, 1.0) to automatically remove vocabulary. If using "thai", this will use list of pre-populated stop words max_features : int or None, if provided, only consider number of vocabulary ordered by term frequencies max_df : float in range [0.0, 1.0] or int, default=1.0 ignore terms that have a document frequency higher than the given threshold min_df : float in range [0.0, 1.0] or int, default=1 ignore terms that have a document frequency lower than the given threshold dtype : type, optional Example ======= raw_documents = ['ฉันอยากกินข้าวของฉัน', 'ฉันอยากกินไก่', 'อยากนอนอย่างสงบ'] tokenizer = DeepcutTokenizer(ngram_range=(1, 1)) X = tokenizer.fit_tranform(raw_documents) # document-term matrix in sparse CSR format >> X.todense() >> [[0, 0, 1, 0, 1, 0, 2, 1], [0, 1, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1, 0, 0]] >> tokenizer.vocabulary_ >> {'นอน': 0, 'ไก่': 1, 'กิน': 2, 'อย่าง': 3, 'อยาก': 4, 'สงบ': 5, 'ฉัน': 6, 'ข้าว': 7} """ def __init__(self, ngram_range=(1, 1), stop_words=None, max_df=1.0, min_df=1, max_features=None, dtype=np.dtype('float64')): self.model = get_convo_nn2() self.model.load_weights(WEIGHT_PATH) self.vocabulary_ = {} self.ngram_range = ngram_range self.dtype = dtype self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df or min_df") self.max_features = max_features self.stop_words = _check_stop_list(stop_words) def _word_ngrams(self, tokens): """ Turn tokens into a tokens of n-grams ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153 """ # handle stop words if self.stop_words is not None: tokens = [w for w in tokens if w not in self.stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens if min_n == 1: # no need to do any slicing for unigrams # just iterate through the original tokens tokens = list(original_tokens) min_n += 1 else: tokens = [] n_original_tokens = len(original_tokens) # bind method outside of loop to reduce overhead tokens_append = tokens.append space_join = " ".join for n in range(min_n, min(max_n + 1, n_original_tokens + 1)): for i in range(n_original_tokens - n + 1): tokens_append(space_join(original_tokens[i: i + n])) return tokens def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L734-L773 """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: tfs = np.asarray(X.sum(axis=0)).ravel() mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[
np.where(mask)
numpy.where
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] ==
np.bool_(False)
numpy.bool_
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return
np.zeros(m)
numpy.zeros
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if
np.sum(self.synthesis_perturbed_img[x, :])
numpy.sum
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask =
np.ones(X.shape[0], dtype=np.bool)
numpy.ones
import torch import torchvision import matplotlib import matplotlib.pyplot as plt from PIL import Image from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils import * from image_utils import * from captum_utils import * import numpy as np from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names = load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img = gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img /
np.max(img)
numpy.max
from data.data_loader_dad import ( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics import classification_report import numpy as np import torch import torch.nn as nn from torch import optim from torch.utils.data import DataLoader import os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self, flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag == 'test': shuffle_flag = False; drop_last = True; batch_size = args.batch_size else: shuffle_flag = True; drop_last = True; batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print("Early stopping") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues = [] labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds =
np.array(preds)
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot *
np.ones(101)
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 *
np.ones(100)
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 *
np.cos(0.04 * 2 * np.pi * t)
numpy.cos
import os import random from typing import Any, Dict, List, Union import numpy as np import torch from colorama import Fore, Style from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support as score from sklearn.metrics import precision_score, recall_score def highlight(input_: Any) -> str: input_ = str(input_) return str(Fore.YELLOW + str(input_) + Style.RESET_ALL) def get_intent_labels(args: Any) -> List[str]: return [ label.strip() for label in open( os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8" ) ] def get_slot_labels(args: Any) -> List[str]: return [ label.strip() for label in open( os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8" ) ] def get_pos_labels(args: Any) -> List[str]: return [ label.strip() for label in open( os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8" ) ] def set_torch_seed(seed: Any, no_cuda: bool) -> None: random.seed(seed)
np.random.seed(seed)
numpy.random.seed
try: import importlib.resources as pkg_resources except ImportError: # Try backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from . import images from gym import Env, spaces from time import time import numpy as np from copy import copy import colorsys import pygame from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time = max_time if impact_size % 2 == 0: raise ValueError('Impact_size must be an odd number !') self.impact_size = impact_size # Define constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG = 2 self.BOMB = self.impact_size ** 2 # Setting up gym Env conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact =
np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
numpy.ones
import os import string from collections import Counter from datetime import datetime from functools import partial from pathlib import Path from typing import Optional import numpy as np import pandas as pd from scipy.stats.stats import chisquare from tangled_up_in_unicode import block, block_abbr, category, category_long, script from pandas_profiling.config import Settings from pandas_profiling.model.summary_helpers_image import ( extract_exif, hash_image, is_image_truncated, open_image, ) def mad(arr: np.ndarray) -> np.ndarray: """Median Absolute Deviation: a "Robust" version of standard deviation. Indices variability of the sample. https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr - np.median(arr))) def named_aggregate_summary(series: pd.Series, key: str) -> dict: summary = { f"max_{key}": np.max(series), f"mean_{key}": np.mean(series), f"median_{key}":
np.median(series)
numpy.median
try: import importlib.resources as pkg_resources except ImportError: # Try backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from . import images from gym import Env, spaces from time import time import numpy as np from copy import copy import colorsys import pygame from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time = max_time if impact_size % 2 == 0: raise ValueError('Impact_size must be an odd number !') self.impact_size = impact_size # Define constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG = 2 self.BOMB = self.impact_size ** 2 # Setting up gym Env conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time = time() self.time_left = int(time() - self.start_time) # Setup rendering self.pygame_is_init = False self.chicken = chicken self.done = False self.score = 0 def get_observation(self): observation = copy(self.state[:, :, 1]) revealed = observation == 1 flaged = observation == 2 observation += self.impact_size ** 2 + 1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return observation def reveal_around(self, coords, reward, done, without_loss=False): if not done: x_min, x_max, _, _ = self.clip_index(coords[0], 0) y_min, y_max, _, _ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around =
np.sum(region[..., 1] == 0)
numpy.sum
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cntk as C import numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\'s CPU version is not fully optimized,' 'please run with GPU to get better performance.') # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic op, to hook up with keras model, # we will create gradient as a constant placeholder, here use this global # map to keep the mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value %s is not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE = value def clear_session(): """Reset learning phase flag for cntk backend. """ global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK currently don't support cond op, so here we use # element_select approach as workaround. It may have # perf issue, will resolve it later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result = x if training == 1 or training is True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 else: # cntk only running with float, # try to cast to float to run the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if name is None: name = '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we don't support init parameter with symbolic op, so eval it first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0: value = value.astype(dtype) # TODO: remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims, dims)) if dims == 4: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 3: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 2: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is not supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is not supported, at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. """ return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name == '': return prefix + '/' + default return prefix + '/' + name def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' scale = (high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx() for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') # how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is None: dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for _ in x.shape: if _ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not supported. Please provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in float, so don't need case from bool / int return x def dot(x, y): if len(x.shape) > 2 or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes = (axes, axes) if axes is None: # behaves like tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) if len(x_shape) == 2 and len(y_shape) == 2: if axes[0] == axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return result if axes[0] == 1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i < len(x.shape) - 1: x = C.swapaxes(x, i, i + 1) i += 1 i = normalized_axis[1] while i > 0: y = C.swapaxes(y, i, i - 1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1) if len(y_shape) == 2: result = squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): # There is a bug in cntk gather op which may cause crash. # We have made a fix but not catched in CNTK 2.1 release. # Will update with gather op in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis, list): # sequence axis is removed by default, so don't need reshape on it reduce_axes = [] for a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False for a in axis: if isinstance(a, C.Axis): has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis >= 0 else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None else _ for _ in new_shape]) result = C.reshape(x, new_shape) if index < nones: result._keras_shape = shape return result def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis = [] for _ in axis: if isinstance(_, int): _axis.append(_ if _ >= 0 else _ + len(shape)) if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n = (n,) elif isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape): n = tuple([1 for _ in range(len(shape) - len(n))]) + n if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n): if i >= num_dynamic_axis and shape[i] is not None: tmp = [x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s` has ' '%d cntk dynamic axis, this is not expected, please ' 'double check the keras shape history.' % (str(shape), nones)) # Current cntk does not support shape like (1, batch). so using the workaround # here to mapping the correct axis. Will remove this tricky after we add support # in native cntk op cntk_axis = [] dynamic_axis_index = 0 for i in range(ndim): if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis = list(axis) else: _axis = axis if isinstance(_axis, list): for i, a in enumerate(_axis): if a is not None and a < 0: _axis[i] = (a % ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ + len(shape) if _ < 0 else _ for _ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result = x for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else: for index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value): if max_value is not None and max_value < min_value: max_value = min_value if max_value is None: max_value = np.inf if min_value is None: min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum + value * (1. - momentum)) def update_add(x, increment): result = x + increment return C.assign(x, result) def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon) else: # need broadcasting target_shape = [] x_shape = int_shape(x) # skip the batch axis for axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is None: shift = x # Compute true mean while keeping the dims for proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean / var / beta / gamma may be processed by broadcast # so it may have an extra batch axis with 1, it is not needed # in cntk, need to remove those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1: # collapse axis with batch axis if b_any(_ == C.InferredDimension for _ in x.shape) or b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend does not support ' 'collapse of batch axis with inferred dimension. ' 'The reshape did not take place.') return x return _reshape_batch(x, shape) else: # no collapse, then first need to padding the shape if num_dynamic_axis >= len(shape): i = 0 while i < len(shape): if shape[i] is None or shape[i] == -1: i += 1 else: break shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i in range(dims)] else: current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s ' 'requested permute on dynamic axis, ' 'which is not supported. Please do permute ' 'on static axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis = axis[0] slices = [] shape = x.shape i = 0 while i < shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1) for _ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x, n): # this is a workaround for recurrent layer # if n is inferred dimension, # we can't figure out how to repeat it in cntk now # return the same x to take cntk broadcast feature # to make the recurrent layer work. # need to be fixed in GA. if n is C.InferredDimension or n is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if index < 0 or index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims < 3: raise ValueError('Input should be at least 3D.') # if the second axis is static axis, CNTK will do unroll by default if shape[1] is None: raise ValueError('CNTK Backend: the input of static rnn ' 'has shape `%s`, the second axis ' 'is not static. If you want to run ' 'rnn with non-static axis, please try ' 'dynamic rnn with sequence axis.' % shape) if constants is None: constants = [] if mask is not None: mask_shape = int_shape(mask) if len(mask_shape) == dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis = 1 - nones if nones > 0 else 1 if go_backwards: i = shape[1] - 1 while i >= 0: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states i -= 1 else: i = 0 while i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, 1) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i += 1 i = 1 # add the time_step axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs): # add the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False if dims < 3: raise ValueError('CNTK Backend: the input of rnn has only rank %d ' 'Need at least rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants is None: constants = [] num_time_step = shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length sequences. Please specify a ' 'static length for your sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if isinstance(constant, list): new_c = [] for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask is not None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m is not None: new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)] n_s = [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x = (0.2 * x) + 0.5 x = C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid strides for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x) x = C.relu(x) if max_value is not None: x = C.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha * negative_part return x def dropout(x, level, noise_shape=None, seed=None): if level < 0. or level >= 1: raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be in interval [0, 1].' % level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is not in shape, # so just flatten all the dim in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1), while keras expect (batch, ) return C.reshape(result, ()) else: # scale preds so that the class probas of each sample sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates = None self.updates = updates if len(updates) > 0: assert len(outputs) > 0 self.loss = outputs[0] # need group update by gradient place holder u_ops = [] unrelated_updates = [] for update in updates: if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u = update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient node `%s` which is not ' 'related to any parameters in the model. ' 'Please double check how the gradient node ' 'is constructed.' % g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss and 1 metric in trainer, for metrics more # than 2, need manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i != p and p != C.InferredDimension and p != C.FreeDimension: return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value in zip(self.placeholders, inputs): # cntk only support calculate on float, do auto cast here if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk can't support input with variable # length. Will support it in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been resolved ' 'to shape `%s`, but input shape is `%s`. Currently ' 'CNTK can not take variable length inputs. Please ' 'pass inputs that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer is not None: input_dict = {} for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is not found in inputs. ' 'Please double check the model and inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {} for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) # Some ops (like dropout) won't be applied during "eval" in cntk. # They only evaluated in training phase. To make it work, call # "forward" method to let cntk know we want to evaluate them.from # But the assign ops won't be executed under this mode, that's why # we need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict = {} for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x, padding, 1) return x def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result = [] for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples: x = t[0] value = t[1] if isinstance(value, np.ndarray) is False: value =
np.asarray(value)
numpy.asarray
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img =
np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)
numpy.full
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble =
np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
import numpy as np from sklearn.linear_model import LogisticRegression from .models import User from .twitter import vectorize_tweet def predict_user(user1_name, user2_name, tweet_text): """ Determine and return which user is more likely to say a given Tweet. Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!') Returns 1 corresponding to 1st user passed in, or 0 for second. """ user1 = User.query.filter(User.name == user1_name).one() user2 = User.query.filter(User.name == user2_name).one() user1_vect = np.array([tweet.vect for tweet in user1.tweets]) user2_vect =
np.array([tweet.vect for tweet in user2.tweets])
numpy.array
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt class TwoLayerNet(object): """ A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - softmax The outputs of the second fully-connected layer are the scores for each class. """ def __init__(self, input_size, hidden_size, output_size, std=1e-4): """ Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. """ self.params = {} self.params['W1'] = std * np.random.randn(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = std * np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def loss(self, X, y=None, reg=0.0): """ Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. """ # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] N, D = X.shape # Compute the forward pass scores = None ####################################################################### # TODO: Perform the forward pass, computing the class scores for the # # input. Store the result in the scores variable, which should be an # # array of shape (N, C). # ####################################################################### scores1 = X.dot(W1) + b1 # FC1 X2 = np.maximum(0, scores1) # ReLU FC1 scores = X2.dot(W2) + b2 # FC2 ####################################################################### # END OF YOUR CODE # ####################################################################### # If the targets are not given then jump out, we're done if y is None: return scores scores -= np.max(scores) # Fix Number instability scores_exp = np.exp(scores) probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True) # Compute the loss loss = None ####################################################################### # TODO: Finish the forward pass, and compute the loss. This should # # include both the data loss and L2 regularization for W1 and W2. # # Store the result in the variable loss, which should be a scalar. Use# # the Softmax classifier loss. # ####################################################################### correct_probs = -np.log(probs[np.arange(N), y]) # L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs) loss = np.sum(correct_probs) loss /= N # L2 regularization WRT W1 and W2 loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2)) ####################################################################### # END OF YOUR CODE # ####################################################################### # Backward pass: compute gradients grads = {} ############################################################################# # TODO: Compute the backward pass, computing the derivatives of the weights # # and biases. Store the results in the grads dictionary. For example, # # grads['W1'] should store the gradient on W1, and be a matrix of same size # ############################################################################# # gradient of loss_i WRT scores_k # dL_i/ds_k = probs_k-1(y_i == k) # this means the gradient is the score for "other" classes and score-1 # for the target class d_scores = probs.copy() d_scores[
np.arange(N)
numpy.arange
#!/usr/bin/env python3 import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') try: tf.config.experimental.set_memory_growth(physical_devices[0], True) except: # Invalid device or cannot modify virtual devices once initialized. pass import numpy as np import os, time, csv import tqdm import umap import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import datetime import signal import net from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP'] import net class SimpleEncodeDecoder: def __init__(self): self.save_dir = './result/step1/' self.result_dir = './result/plot/' os.makedirs(self.result_dir, exist_ok=True) checkpoint_dir = self.save_dir self.max_epoch = 300 self.steps_per_epoch = 1000 self.batch_size = 64 lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5) self.optimizer = tf.keras.optimizers.Adam(lr) self.encoder = net.FeatureBlock() self.encoder.summary() self.decoder = net.SimpleDecoderBlock() self.decoder.summary() inputs = { 'image': tf.keras.Input(shape=(128,128,3)), } feature_out = self.encoder(inputs) outputs = self.decoder(feature_out) self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder') checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) last = tf.train.latest_checkpoint(checkpoint_dir) checkpoint.restore(last) self.manager = tf.train.CheckpointManager( checkpoint, directory=checkpoint_dir, max_to_keep=2) if not last is None: self.init_epoch = int(os.path.basename(last).split('-')[1]) print('loaded %d epoch'%self.init_epoch) else: self.init_epoch = 0 self.model.summary() def eval(self): self.data = net.FontData() print("Plot: ", self.init_epoch + 1) acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1)) print('acc', acc) @tf.function def eval_substep(self, inputs): input_data = { 'image': inputs['input'], } feature = self.encoder(input_data) outputs = self.decoder(feature) target_id = inputs['index'] target_id1 = inputs['idx1'] target_id2 = inputs['idx2'] pred_id1 = tf.nn.softmax(outputs['id1'], -1) pred_id2 = tf.nn.softmax(outputs['id2'], -1) return { 'feature': feature, 'pred_id1': pred_id1, 'pred_id2': pred_id2, 'target_id': target_id, 'target_id1': target_id1, 'target_id2': target_id2, } def make_plot(self, test_ds, epoch): result = [] labels = [] with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt: correct_count = 0 failed_count = 0 with tqdm.tqdm(total=len(self.data.test_keys)) as pbar: for inputs in test_ds: pred = self.eval_substep(inputs) result += [pred['feature']] labels += [pred['target_id']] for i in range(pred['target_id1'].shape[0]): txt.write('---\n') target = pred['target_id'][i].numpy() txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1])) predid1 =
np.argmax(pred['pred_id1'][i])
numpy.argmax
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '5': nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '6': nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '7': nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '8': nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '9': nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'A': nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'B': nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'C': nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'D': nibble = np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'E': nibble =
np.array([1,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series =
np.sin(pseudo_alg_time)
numpy.sin
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) C = [0.1,1,2,5,10] solver = ['newton-cg','saga','sag'] best_params = dict() best_score = 0.0 for c in C: for s in solver: start = time.time() log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c) log.fit(X_train, y_train) predictions = log.predict(X_val) print("###########################################") print("LR with C =",c,'and solver = ',s) print("Results using embeddings from the", layer_json, "file") print(classification_report(y_val, predictions)) f1 = f1_score(y_val, predictions) if f1 > best_score: best_score = f1 best_params['c'] = c best_params['solver'] = s print("F1 score using Logistic Regression:",f1) print("###########################################") end = time.time() running_time = end - start print("Running time:"+str(running_time)) def visualize_DNN(file_to_save): ''' Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd :param file_to_save: the png file that the architecture of the DNN will be saved. :return: None ''' model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) plot_model(model, to_file=file_to_save, show_shapes=True) def save_model(sentences_list,layer_json,dataset_csv,pkl): dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list, layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length =
np.asarray(length)
numpy.asarray
from __future__ import division from math import sqrt as sqrt from itertools import product as product import torch import numpy as np import cv2 from lib.utils.visualize_utils import TBWriter def vis(func): """tensorboard visualization if has writer as input""" def wrapper(*args, **kw): return func(*args, **kw) if kw['tb_writer'] is not None else None return wrapper class PriorBoxBase(object): """Compute priorbox coordinates in center-offset form for each source feature map. """ def __init__(self, cfg): super(PriorBoxBase, self).__init__() self.image_size = cfg.MODEL.IMAGE_SIZE self._steps = cfg.MODEL.STEPS self._cfg_list = [] self._prior_cfg = {} self._clip = cfg.MODEL.CLIP self._variance = cfg.MODEL.VARIANCE for v in self._variance: if v <= 0: raise ValueError('Variances must be greater than 0') def _setup(self, cfg): num_feat = len(self._steps) for item in self._cfg_list: if item not in cfg.MODEL: raise Exception("wrong anchor config!") if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0: raise Exception("config {} length does not match step length!".format(item)) self._prior_cfg[item] = cfg.MODEL[item] @property def num_priors(self): """allow prior num calculation before knowing feature map size""" assert self._prior_cfg is not {} return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))] def _create_prior(self, cx, cy, k): raise NotImplementedError @vis def _image_proc(self, image=None, tb_writer=None): # TODO test with image if isinstance(image, type(None)): image =
np.ones((self.image_size[1], self.image_size[0], 3))
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101),
np.linspace(-5, 5, 101)
numpy.linspace
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype) strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join( f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): """ Test transforming matrix columns to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T] ) expected = "{}\n".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): """ Test the function fails for arrays of different sizes. """ x = np.arange(5) y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print("This should have failed") def test_virtualfile_from_matrix(): """ Test transforming a matrix to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): """ Test transforming a slice of a larger array to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (10, 6) for dtype in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3 data = full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): """ Pass vectors to a dataset using pandas Series. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 13 for dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size * 2, size * 3, 1, dtype=dtype), ) ) with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( [ "<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (data.x, data.y, data.z) ] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected def test_virtualfile_from_vectors_arraylike(): """ Pass array-like vectors to a dataset. """ size = 13 x = list(range(0, size, 1)) y = tuple(range(size, size * 2, 1)) z = range(size * 2, size * 3, 1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected def test_extract_region_fails(): """ Check that extract region fails if nothing has been plotted. """ Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): """ Extract region should handle multiple figures existing at the same time. """ # Make two figures before calling extract_region to make sure that it's # getting from the current figure, not the last figure. fig1 = Figure() region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection="M6i", frame=True, land="black") fig2 = Figure() fig2.basemap(region="US.HI+r5", projection="M6i", frame=True) # Activate the first figure and extract the region from it # Use in a different session to avoid any memory problems. with clib.Session() as lib: lib.call_module("figure", "{} -".format(fig1._name)) with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with the second one with clib.Session() as lib: lib.call_module("figure", "{} -".format(fig2._name)) with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails(): """ Check that write data raises an exception for non-zero return codes. """ # It's hard to make the C API function fail without causing a Segmentation # Fault. Can't test this if by giving a bad file name because if # output=='', GMT will just write to stdout and spaces are valid file # names. Use a mock instead just to exercise this part of the code. with clib.Session() as lib: with mock(lib, "GMT_Write_Data", returns=1): with pytest.raises(GMTCLibError): lib.write_data( "GMT_IS_VECTOR", "GMT_IS_POINT", "GMT_WRITE_SET", [1] * 6, "some-file-name", None, ) def test_dataarray_to_matrix_works(): """ Check that dataarray_to_matrix returns correct output. """ data = np.diag(v=np.arange(3)) x =
np.linspace(start=0, stop=4, num=3)
numpy.linspace
"""Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors Refs: References [1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html """ import numpy as N import ctypes import os import pkg_resources def ord_string(s): b = bytearray() arr = b.extend(map(ord, s)) return N.array([x for x in b] + [0]).astype(N.uint8) class TSNE(object): def __init__(self, n_components=2, perplexity=50.0, early_exaggeration=2.0, learning_rate=200.0, num_neighbors=1023, force_magnify_iters=250, pre_momentum=0.5, post_momentum=0.8, theta=0.5, epssq=0.0025, n_iter=1000, n_iter_without_progress=1000, min_grad_norm=1e-7, perplexity_epsilon=1e-3, metric='euclidean', init='random', return_style='once', num_snapshots=5, verbose=0, random_seed=None, use_interactive=False, viz_timeout=10000, viz_server="tcp://localhost:5556", dump_points=False, dump_file="dump.txt", dump_interval=1, print_interval=10, device=0, ): """Initialization method for barnes hut T-SNE class. """ # Initialize the variables self.n_components = int(n_components) if self.n_components != 2: raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.') self.perplexity = float(perplexity) self.early_exaggeration = float(early_exaggeration) self.learning_rate = float(learning_rate) self.n_iter = int(n_iter) self.n_iter_without_progress = int(n_iter_without_progress) self.min_grad_norm = float(min_grad_norm) if metric not in ['euclidean']: raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.') else: self.metric = metric if init not in ['random']: raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.') else: self.init = init self.verbose = int(verbose) # Initialize non-sklearn variables self.num_neighbors = int(num_neighbors) self.force_magnify_iters = int(force_magnify_iters) self.perplexity_epsilon = float(perplexity_epsilon) self.pre_momentum = float(pre_momentum) self.post_momentum = float(post_momentum) self.theta = float(theta) self.epssq =float(epssq) self.device = int(device) self.print_interval = int(print_interval) # Point dumpoing self.dump_file = str(dump_file) self.dump_points = bool(dump_points) self.dump_interval = int(dump_interval) # Viz self.use_interactive = bool(use_interactive) self.viz_server = str(viz_server) self.viz_timeout = int(viz_timeout) # Return style if return_style not in ['once','snapshots']: raise ValueError('Invalid return style...') elif return_style == 'once': self.return_style = 0 elif return_style == 'snapshots': self.return_style = 1 self.num_snapshots = int(num_snapshots) # Build the hooks for the BH T-SNE library self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location # self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library # self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library # Hook the BH T-SNE function self._lib.pymodule_bh_tsne.restype = None self._lib.pymodule_bh_tsne.argtypes = [ N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points ctypes.POINTER(N.ctypeslib.c_intp), # dims ctypes.c_float, # Perplexity ctypes.c_float, # Learning Rate ctypes.c_float, # Magnitude Factor ctypes.c_int, # Num Neighbors ctypes.c_int, # Iterations ctypes.c_int, # Iterations no progress ctypes.c_int, # Force Magnify iterations ctypes.c_float, # Perplexity search epsilon ctypes.c_float, # pre-exaggeration momentum ctypes.c_float, # post-exaggeration momentum ctypes.c_float, # Theta ctypes.c_float, # epssq ctypes.c_float, # Minimum gradient norm ctypes.c_int, # Initialization types N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS')
numpy.ctypeslib.ndpointer
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSourceAngle(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. For this lense an additional z-boost is added (Angle of incidence in z-direction). """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSourceAngle, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.angle = angle self.throw = 0 self.source_id = "LensSourceAngle_" + str(id(self)) def photon(self): photon = Photon() photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) boost = y*np.tan(self.angle) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] + boost direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class CylindricalSource(object): """ A source for photons emitted in a random direction and position inside a cylinder(radius, length) """ def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10): super(CylindricalSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.shape = Cylinder(radius = radius, length = length) self.radius = radius self.length = length self.throw = 0 self.source_id = "CylindricalSource_" + str(id(self)) def translate(self, translation): self.shape.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.shape.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position of emission phi = np.random.uniform(0., 2*np.pi) r = np.random.uniform(0.,self.radius) x = r*np.cos(phi) y = r*np.sin(phi) z =
np.random.uniform(0.,self.length)
numpy.random.uniform
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 =
np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101)
numpy.linspace
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm =
np.linalg.norm(perturbed_vp)
numpy.linalg.norm
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 =
np.linalg.norm(pts2[0]-pts2[1])
numpy.linalg.norm
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr = np.array(out_arr, copy=False) return out_arr out_arr.units = unit if out_arr.size == 1: return YTQuantity(np.array(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 return YTArray(np.array(out_arr), unit) return ret_class(np.array(out_arr, copy=False), unit) else: # numpy version equal to or newer than 1.13 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): func = getattr(ufunc, method) if 'out' in kwargs: out_orig = kwargs.pop('out') out = np.asarray(out_orig[0]) else: out = None if len(inputs) == 1: _, inp, u = get_inp_u_unary(ufunc, inputs) out_arr = func(np.asarray(inp), out=out, **kwargs) if ufunc in (multiply, divide) and method == 'reduce': power_sign = POWER_SIGN_MAPPING[ufunc] if 'axis' in kwargs and kwargs['axis'] is not None: unit = u**(power_sign*inp.shape[kwargs['axis']]) else: unit = u**(power_sign*inp.size) else: unit = self._ufunc_registry[ufunc](u) ret_class = type(self) elif len(inputs) == 2: unit_operator = self._ufunc_registry[ufunc] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class) elif unit_operator is preserve_units: inps, units = handle_preserve_units( inps, units, ufunc, ret_class) unit = unit_operator(*units) out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs) if unit_operator in (multiply_units, divide_units): out, out_arr, unit = handle_multiply_divide_units( unit, units, out, out_arr) else: raise RuntimeError( "Support for the %s ufunc with %i inputs has not been" "added to YTArray." % (str(ufunc), len(inputs))) if unit is None: out_arr = np.array(out_arr, copy=False) elif ufunc in (modf, divmod_): out_arr = tuple((ret_class(o, unit) for o in out_arr)) elif out_arr.size == 1: out_arr = YTQuantity(
np.asarray(out_arr)
numpy.asarray
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot *
np.ones(101)
numpy.ones
import numpy as np from sklearn.linear_model import LogisticRegression from .models import User from .twitter import vectorize_tweet def predict_user(user1_name, user2_name, tweet_text): """ Determine and return which user is more likely to say a given Tweet. Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!') Returns 1 corresponding to 1st user passed in, or 0 for second. """ user1 = User.query.filter(User.name == user1_name).one() user2 = User.query.filter(User.name == user2_name).one() user1_vect = np.array([tweet.vect for tweet in user1.tweets]) user2_vect = np.array([tweet.vect for tweet in user2.tweets]) vects = np.vstack([user1_vect, user2_vect]) labels = np.concatenate([np.ones(len(user1.tweets)), np.zeros(len(user2.tweets))]) log_reg = LogisticRegression().fit(vects, labels) # We've done the model fitting, now to predict... hypo_tweet_vect = vectorize_tweet(tweet_text) return log_reg.predict(
np.array(hypo_tweet_vect)
numpy.array
from gtrain import Model import numpy as np import tensorflow as tf class NetForHypinv(Model): """ Implementaion of the crutial function for the HypINV algorithm. Warning: Do not use this class but implement its subclass, for example see FCNetForHypinv """ def __init__(self, weights): self.eval_session = None self.grad_session = None self.initial_x = None self.center = None self.weights = weights self.out_for_eval = None #(going to be filled in build_for_eval method) self.boundary_out_for_eval = None self.trained_x = None self.training_class_index = None self.x = None # tf variable for inversion (going to be filled in build method) self.x_for_eval = None self.out = None self.boundary_out = None # list of tf tensorf for each class of softmax class vs others output self.loss = None self.boundary_loss = None self.t = None #target self.boundary_t = None self.x1 = None # this attribute is used of purposes of modified loss function def __del__(self): # close arr sessions if self.eval_session: self.eval_session.close() if self.grad_session: self.grad_session.close() def set_initial_x(self, initial_x): # sets starting point for the search of the closest point self.initial_x = initial_x def set_center(self, center): # sets center point self.center = center / np.linalg.norm(center) def set_x1(self, x1): # sets x1 to which we want to found the cosest point x0 self.x1 = x1 def has_modified_loss(self): pass # if uses modified loss then it returns true def set_initial_x_in_session(self, x, session=None): # sets initial x in certain session if session is None: self.set_initial_x(x) else: pass # overide this method def eval(self, x): if len(x.shape) == 1: x = x.reshape((1,len(x))) if not self.eval_session: self.eval_session = tf.Session() with self.eval_session.as_default(): self.build_for_eval() self.eval_session.run(tf.global_variables_initializer()) return self.eval_session.run(self.out_for_eval, {self.x_for_eval: x}) def boundary_eval(self, x, class_index): # evaluates binary classificaitons class_index and other classes if not self.eval_session: self.eval_session = tf.Session() with self.eval_session.as_default(): self.build_for_eval() self.eval_session.run(tf.global_variables_initializer()) return self.eval_session.run(self.boundary_out_for_eval[class_index], {self.x_for_eval: x}) def get_boundary_gradient(self, x, class_index): # computes gradient of the boundary for specified class_index if not self.grad_session: self.grad_session = tf.Session() with self.grad_session.as_default(): self.build_for_eval() self.grad = list() for i in range(len(self.weights[0][-1][0])): self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0]) self.grad_x = self.x_for_eval return self.grad_session.run(self.grad[class_index], {self.grad_x: x}) def build_for_eval(self): # build model for evaluation pass #override this method (fill self.out_for_eval) def train_ended(self, session): self.trained_x = session.run(self.x) def build(self): # build model for training pass #override this method (fill self.x, self.out) def set_train_class(self, class_index): # sets class of the x1 self.training_class_index = class_index # overided methods from gtrain.Model def get_loss(self): if self.training_class_index is None: return self.loss else: return self.boundary_loss[self.training_class_index] def get_hits(self): return self.get_loss() def get_count(self): return self.get_loss() def get_train_summaries(self): return [] def get_dev_summaries(self): return [] def get_placeholders(self): if self.training_class_index is None: return [self.t] else: return [self.boundary_t] #________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________ class FCNetForHypinv(NetForHypinv): """ Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm """ def __init__(self, weights, function=tf.sigmoid, use_modified_loss=False, mu = 0.01): """ :param weights: saved as [list of weights for layers][0 weight, 1 bias] :param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan :param use_modified_loss: weather the modified loss should be used :param mu: factor of the penalty terms that specified the distance between x0 and x1 and the distance x1 from the boundary """ super(FCNetForHypinv, self).__init__(weights) self.function = function self.layer_sizes = [len(self.weights[0][0])] for bias in weights[1]: self.layer_sizes.append(len(bias)) self.num_classes = self.layer_sizes[-1] self.initial_x = np.zeros([1, self.layer_sizes[0]]) self.use_modified_loss = use_modified_loss self.mu = mu def build(self): with tf.name_scope("Input"): if self.center is not None: self.point_weights = tf.Variable(self.center.reshape((1, len(self.center))), dtype=tf.float64, trainable=False, name="Boundary_point") init_factor = self.center init_factor[init_factor!=0] = self.initial_x[init_factor!=0] / self.center[init_factor!=0] self.factor = tf.Variable(init_factor.reshape((1, len(self.center))), dtype=tf.float64, name="factor") else: self.point_weights = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))), dtype=tf.float64, trainable=False, name="Boundary_point") self.factor = tf.Variable(np.ones((1, len(self.center))), dtype=tf.float64, name="factor") self.x = self.point_weights * self.factor with tf.name_scope("Target"): if self.use_modified_loss: x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64) self.t = tf.placeholder(tf.float64, shape=[None, self.num_classes], name="Target_output") self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output") with tf.name_scope("FC_net"): flowing_x = self.x for i, _ in enumerate(self.weights[0]): with tf.name_scope("layer_{}".format(i)): W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64) b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64) flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b)) y = flowing_x self.out = tf.nn.softmax(y) with tf.name_scope("Binary_class_output"): self.boundary_out = list() for i in range(self.num_classes): mask = True+np.zeros(self.num_classes, dtype=np.bool) mask[i] = False x0 = self.out[:,i] x1 = tf.reduce_max(tf.boolean_mask(self.out, mask, axis=1), axis=1) s = x0+x1 out = tf.stack([x0/s, x1/s], axis=1) self.boundary_out.append(out) with tf.name_scope("Loss_functions"): self.loss = tf.reduce_mean( tf.nn.l2_loss(self.out-self.t), name="loss") with tf.name_scope("Binary_class_loss"): self.boundary_loss = list() if self.use_modified_loss: for i in range(self.num_classes): self.boundary_loss.append( tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) + self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant)) ) else: for i in range(self.num_classes): self.boundary_loss.append( tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t)) ) def set_initial_x_in_session(self, x, session=None): if session is None: self.set_initial_x(x) else: if self.center is None: session.run([ self.point_weights.assign(x.reshape((1, len(x)))), self.factor.assign(np.ones((1, len(x)))) ]) else: init_factor = self.center init_factor[init_factor!=0] = x[init_factor!=0] / self.center[init_factor!=0] session.run(self.factor.assign(init_factor.reshape((1,len(init_factor))))) def build_for_eval(self): with tf.name_scope("eInput"): self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point") with tf.name_scope("eFC_net"): flowing_x = self.x_for_eval for i, _ in enumerate(self.weights[0]): W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i)) b = tf.constant(self.weights[1][i], name="eBias_{}".format(i)) flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i)) y = flowing_x self.out_for_eval = tf.nn.softmax(y) with tf.name_scope("Binary_class_output"): self.boundary_out_for_eval = list() for i in range(self.num_classes): mask = True+np.zeros(self.num_classes, dtype=np.bool) mask[i] = False x0 = self.out_for_eval[:, i] x1 = tf.reduce_max(tf.boolean_mask(self.out_for_eval, mask, axis=1), axis=1) s = x0+x1 out = tf.stack([x0/s, x1/s], axis=1) self.boundary_out_for_eval.append(out) def has_modified_loss(self): return self.use_modified_loss def name(self): return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes])) class FCNetForHypinvBinary(FCNetForHypinv): """ Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm The task is simplified to the binary classificaiton base_class_index against the other classes """ def __init__(self, weights, base_class_index, function=tf.sigmoid, use_modified_loss=False, mu = 0.01): """ :param weights: saved as [list of weights for layers][0 weight, 1 bias] :param base_class_index: an index of the class which is used as the base class :param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan :param use_modified_loss: weather the modified loss should be used :param mu: factor of the penalty terms that specified the distance between x0 and x1 and the distance x1 from the boundary """ super(FCNetForHypinvBinary, self).__init__(weights) self.base_class_index = base_class_index self.function = function self.layer_sizes = [len(self.weights[0][0])] for bias in weights[1]: self.layer_sizes.append(len(bias)) self.num_classes = self.layer_sizes[-1] self.initial_x = np.zeros([1, self.layer_sizes[0]]) self.use_modified_loss = use_modified_loss self.mu = mu def build(self): with tf.name_scope("Input"): self.init_point = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))), dtype=tf.float64, trainable=False, name="Boundary_point") self.factor = tf.Variable(np.ones((1, len(self.initial_x))), dtype=tf.float64, name="factor") self.x = self.init_point * self.factor with tf.name_scope("Target"): if self.use_modified_loss: x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64) self.t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_output") self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output") with tf.name_scope("FC_net"): flowing_x = self.x for i, _ in enumerate(self.weights[0]): with tf.name_scope("layer_{}".format(i)): W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64) b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64) flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b)) y = flowing_x full_out = tf.nn.softmax(y) with tf.name_scope("Binary_class_output"): self.boundary_out = list() mask = True+
np.zeros(self.num_classes, dtype=np.bool)
numpy.zeros
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert np.all(lq ** power == lq) else: with pytest.raises(u.UnitsError): lq ** power # with dimensionless, it works, but falls back to normal quantity # (except for power=1) lq2 = u.Magnitude(np.arange(10.)) t = lq2**power if power == 0: assert t.unit is u.dimensionless_unscaled assert np.all(t.value == 1.) elif power == 1: assert np.all(t == lq2) else: assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit ** power with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(u.dimensionless_unscaled) def test_error_on_lq_as_power(self): lq = u.Magnitude(np.arange(1., 4.)*u.Jy) with pytest.raises(TypeError): lq ** lq @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) q = 1.23 * other with pytest.raises(u.UnitsError): lq + q with pytest.raises(u.UnitsError): lq - q with pytest.raises(u.UnitsError): q - lq @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_addition_subtraction(self, other): """Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq + other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_sr = other + lq assert_allclose(lq_sr.physical, lq.physical * other_physical) lq_df = lq - other assert_allclose(lq_df.physical, lq.physical / other_physical) lq_dr = other - lq assert_allclose(lq_dr.physical, other_physical / lq.physical) @pytest.mark.parametrize('other', pu_sample) def test_inplace_addition_subtraction_unit_checks(self, other): lu1 = u.mag(u.Jy) lq1 = u.Magnitude(np.arange(1., 10.), lu1) with pytest.raises(u.UnitsError): lq1 += other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 with pytest.raises(u.UnitsError): lq1 -= other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_inplace_addition_subtraction(self, other): """Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq.copy() lq_sf += other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_df = lq.copy() lq_df -= other assert_allclose(lq_df.physical, lq.physical / other_physical) def test_complicated_addition_subtraction(self): """For fun, a more complicated example of addition and subtraction.""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) DMmag = u.mag(dm0) m_st = 10. * u.STmag dm = 5. * DMmag M_st = m_st - dm assert M_st.unit.is_equivalent(u.erg/u.s/u.AA) assert np.abs(M_st.physical / (m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15 class TestLogQuantityComparisons(object): def test_comparison_to_non_quantities_fails(self): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) # On python2, ordering operations always succeed, given essentially # meaningless results. if not six.PY2: with pytest.raises(TypeError): lq > 'a' assert not (lq == 'a') assert lq != 'a' def test_comparison(self): lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy) lq2 = u.Magnitude(2.*u.Jy) assert np.all((lq1 > lq2) == np.array([True, False, False])) assert np.all((lq1 == lq2) == np.array([False, True, False])) lq3 = u.Dex(2.*u.Jy) assert np.all((lq1 > lq3) == np.array([True, False, False])) assert np.all((lq1 == lq3) == np.array([False, True, False])) lq4 = u.Magnitude(2.*u.m) assert not (lq1 == lq4) assert lq1 != lq4 with pytest.raises(u.UnitsError): lq1 < lq4 q5 = 1.5 * u.Jy assert np.all((lq1 > q5) == np.array([True, False, False])) assert np.all((q5 < lq1) == np.array([True, False, False])) with pytest.raises(u.UnitsError): lq1 >= 2.*u.m with pytest.raises(u.UnitsError): lq1 <= lq1.value * u.mag # For physically dimensionless, we can compare with the function unit. lq6 = u.Magnitude(np.arange(1., 4.)) fv6 = lq6.value * u.mag assert
np.all(lq6 == fv6)
numpy.all
import os import numpy as np import pandas as pd from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, "./train.json")) test = pd.read_json(os.path.join(path, "./test.json")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_1"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_2"]]) angl = df['inc_angle'].map(lambda x:
np.cos(x * np.pi / 180)
numpy.cos
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) + (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) + (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) + (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) / 4.0 ) # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0') adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0') adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1') adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1') adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2') adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2') adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3') adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3') adm_scale0_scores_key = cls.get_scores_key('adm_scale0') adm_scale1_scores_key = cls.get_scores_key('adm_scale1') adm_scale2_scores_key = cls.get_scores_key('adm_scale2') adm_scale3_scores_key = cls.get_scores_key('adm_scale3') result.result_dict[adm_scale0_scores_key] = list( (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale1_scores_key] = list( (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale2_scores_key] = list( (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale3_scores_key] = list( (np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) ) # adm3 = \ # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0 adm3_scores_key = cls.get_scores_key('adm3') result.result_dict[adm3_scores_key] = list( ( ((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((
np.array(result.result_dict[adm_num_scale3_scores_key])
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot *
np.ones(101)
numpy.ones
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z =
np.arange(size * 2, size * 3, 1, dtype=dtype)
numpy.arange
import numpy as np import tensorflow as tf H = 2 N = 2 M = 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): """ Takes masked softmax over given dimension of logits. Inputs: logits: Numpy array. We want to take softmax over dimension dim. mask: Numpy array of same shape as logits. Has 1s where there's real data in logits, 0 where there's padding dim: int. dimension over which to take softmax Returns: masked_logits: Numpy array same shape as logits. This is the same as logits, but with 1e30 subtracted (i.e. very large negative number) in the padding locations. prob_dist: Numpy array same shape as logits. The result of taking softmax over masked_logits in given dimension. Should be 0 in padding locations. Should sum to 1 over given dimension. """ exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1 result = (contexts * q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask = np.array([True, True, False]) # BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2) # BS x N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S = BS x N x M # contexts = BS x N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== "__main__": w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c =
np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]])
numpy.array
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng =
np.random.RandomState(0)
numpy.random.RandomState
import os import numpy as np import cv2 import albumentations from PIL import Image from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase # for examples included in repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation="bicubic"): super().__init__(data_csv="data/ade20k_examples.txt", data_root="data/ade20k_images", segmentation_root="data/ade20k_segmentations", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map and scene label class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None): self.split = self.get_split() self.n_labels = 151 # unknown + 150 self.data_csv = {"train": "data/ade20k_train.txt", "validation": "data/ade20k_test.txt"}[self.split] self.data_root = "./data/ade20k_root" with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for line in self.scene_categories) with open(self.data_csv, "r") as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss = self.split if ss=='train': ss='training' self.labels = { "relative_file_path_": [l for l in self.image_paths], "file_path_": [os.path.join(self.data_root, "images",ss, l) for l in self.image_paths], "relative_segmentation_path_": [l.replace(".jpg", ".png") for l in self.image_paths], "segmentation_path_": [os.path.join(self.data_root, "annotations",ss, l.replace(".jpg", ".png")) for l in self.image_paths], "scene_category": [self.scene_categories[l.replace(".jpg", "")] for l in self.image_paths], } size = None if size is not None and size<=0 else size self.size = size if crop_size is None: self.crop_size = size if size is not None else None else: self.crop_size = crop_size if self.size is not None: self.interpolation = interpolation self.interpolation = { "nearest": cv2.INTER_NEAREST, "bilinear": cv2.INTER_LINEAR, "bicubic": cv2.INTER_CUBIC, "area": cv2.INTER_AREA, "lanczos": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop = not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return self._length def __getitem__(self, i): example = dict((k, self.labels[k][i]) for k in self.labels) image = Image.open(example["file_path_"]) if not image.mode == "RGB": image = image.convert("RGB") image =
np.array(image)
numpy.array
import time import h5py import hdbscan import numpy as np import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs): self.model = model self.loader = loader self.output_file = output_file self.config = config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple internal datasets raw = dataset.raws[0] if raw.ndim == 3: return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predictions from the network are kept in memory. If the results from the network don't fit in into RAM use `LazyPredictor` instead. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f"Using only channel '{prediction_channel}' from the network output") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape else: # single channel prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present) self.model.eval() # Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied! self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # send batch to device batch = batch.to(device) # forward pass predictions = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: predictions = [predictions] # for each output head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to numpy array prediction = prediction.cpu().numpy() # for each batch sample for pred, index in zip(prediction, indices): # save patch index: (C,D,H,W) if prediction_channel is None: channel_slice = slice(0, out_channels) else: channel_slice = slice(0, 1) index = (channel_slice,) + index if prediction_channel is not None: # use only the 'prediction_channel' logger.info(f"Using channel '{prediction_channel}'...") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad in order to avoid block artifacts in the output probability maps u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate probabilities into the output prediction array prediction_map[u_index] += u_prediction # count voxel visits for normalization normalization_mask[u_index] += 1 else: # accumulate probabilities into the output prediction array prediction_map[index] += pred # count voxel visits for normalization normalization_mask[index] += 1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize normalization mask in order to average out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip") class LazyPredictor(StandardPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume into 4 parts and load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take slices which are 1/27 of the original volume patch_shape = (z // 3, y // 3, x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure to reset the slice that has been visited already in order to avoid 'double' normalization # when the patches overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): """ Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format. The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together. """ def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets the module in evaluation mode explicitly self.model.eval() self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send batch to device batch = batch.to(device) # forward pass embeddings = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array prediction = prediction.cpu().numpy() # iterate sequentially because of the current simple stitching that we're using for pred, index in zip(prediction, indices): # convert embeddings to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip") def _embeddings_to_segmentation(self, embeddings): """ Cluster embeddings vectors with HDBSCAN and return the segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation """ # shape of the output segmentation output_shape = embeddings.shape[1:] # reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape in order to get the segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): """ Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels merge the segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple): position of the patch inside `output_segmentation` volume output_segmentation (ndarray): current state of the output segmentation visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited voxels will be marked by a number greater than 0 """ index = tuple(index) # get new unassigned label max_label = np.max(output_segmentation) + 1 # make sure there are no clashes between current segmentation patch and the output_segmentation # but keep the noise label noise_mask = segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap mask in the current patch overlap_mask = visited_voxels_array[index] > 0 # get the new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with the merged labels for current_label, new_label in merged_labels: segmentation[segmentation == new_label] = current_label # update the output_segmentation output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind =
np.argmax(counts)
numpy.argmax
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time *
np.ones_like(minima_dash)
numpy.ones_like
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series =
np.cos(time)
numpy.cos
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y =
np.random.uniform(self.planeorigin[1],self.planeextent[1])
numpy.random.uniform
import gym import numpy as np from itertools import product import matplotlib.pyplot as plt def print_policy(Q, env): """ This is a helper function to print a nice policy from the Q function""" moves = [u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = ' ' for s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] = u'·' print('\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in policy])) def plot_V(Q, env): """ This is a helper function to plot the state values from the Q function""" fig = plt.figure() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape V = np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): """ This is a helper function to plot the Q function """ from matplotlib import colors, patches fig = plt.figure() ax = fig.gca() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape up =
np.array([[0, 1], [0.5, 0.5], [1,1]])
numpy.array
""" Binary serialization NPY format ========== A simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for persisting a *single* arbitrary NumPy array on disk. The format stores all of the shape and dtype information necessary to reconstruct the array correctly even on another machine with a different architecture. The format is designed to be as simple as possible while achieving its limited goals. The ``.npz`` format is the standard format for persisting *multiple* NumPy arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` files, one for each array. Capabilities ------------ - Can represent all NumPy arrays including nested record arrays and object arrays. - Represents the data in its native binary form. - Supports Fortran-contiguous arrays directly. - Stores all of the necessary information to reconstruct the array including shape and dtype on a machine of a different architecture. Both little-endian and big-endian arrays are supported, and a file with little-endian numbers will yield a little-endian array on any machine reading the file. The types are described in terms of their actual sizes. For example, if a machine with a 64-bit C "long int" writes out an array with "long ints", a reading machine with 32-bit C "long ints" will yield an array with 64-bit integers. - Is straightforward to reverse engineer. Datasets often live longer than the programs that created them. A competent developer should be able to create a solution in their preferred programming language to read most ``.npy`` files that they have been given without much documentation. - Allows memory-mapping of the data. See `open_memmap`. - Can be read from a filelike stream object instead of an actual file. - Stores object arrays, i.e. arrays containing elements that are arbitrary Python objects. Files with object arrays are not to be mmapable, but can be read and written to disk. Limitations ----------- - Arbitrary subclasses of numpy.ndarray are not completely preserved. Subclasses will be accepted for writing, but only the array data will be written out. A regular numpy.ndarray object will be created upon reading the file. .. warning:: Due to limitations in the interpretation of structured dtypes, dtypes with fields with empty names will have the names replaced by 'f0', 'f1', etc. Such arrays will not round-trip through the format entirely accurately. The data is intact; only the field names will differ. We are working on a fix for this. This fix will not require a change in the file format. The arrays with such structures can still be saved and restored, and the correct dtype may be restored by using the ``loadedarray.view(correct_dtype)`` method. File extensions --------------- We recommend using the ``.npy`` and ``.npz`` extensions for files saved in this format. This is by no means a requirement; applications may wish to use these file formats but use an extension specific to the application. In the absence of an obvious alternative, however, we suggest using ``.npy`` and ``.npz``. Version numbering ----------------- The version numbering of these formats is independent of NumPy version numbering. If the format is upgraded, the code in `numpy.io` will still be able to read and write Version 1.0 files. Format Version 1.0 ------------------ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. ``\\x01``. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. ``\\x00``. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to make the total of ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible by 64 for alignment purposes. The dictionary contains three keys: "descr" : dtype.descr An object that can be passed as an argument to the `numpy.dtype` constructor to create the array's dtype. "fortran_order" : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. "shape" : tuple of int The shape of the array. For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. ``dtype.hasobject is True``), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on ``fortran_order``) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that ``shape=()`` means there is 1 element) by ``dtype.itemsize``. Format Version 2.0 ------------------ The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. `numpy.save` will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: "The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN." Format Version 3.0 ------------------ This version replaces the ASCII string (which in practice was latin1) with a utf8-encoded string, so supports structured types with any unicode field names. Notes ----- The ``.npy`` format, including motivation for creating it and a comparison of alternatives, is described in the :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have evolved with time and this document is more current. """ import numpy import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import ( isfileobj, os_fspath, pickle ) __all__ = [] EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length # instead of 2 bytes (H) allowing storage of large structured arrays _header_size_info = { (1, 0): ('<H', 'latin1'), (2, 0): ('<I', 'latin1'), (3, 0): ('<I', 'utf8'), } def _check_version(version): if version not in [(1, 0), (2, 0), (3, 0), None]: msg = "we only support format version (1,0), (2,0), and (3,0), not %s" raise ValueError(msg % (version,)) def magic(major, minor): """ Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted. """ if major < 0 or major > 255: raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int """ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) major, minor = magic_str[-2:] return major, minor def _has_metadata(dt): if dt.metadata is not None: return True elif dt.names is not None: return any(_has_metadata(dt[k]) for k in dt.names) elif dt.subdtype is not None: return _has_metadata(dt.base) else: return False def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if _has_metadata(dtype): warnings.warn("metadata on a dtype may be saved or ignored, but will " "raise if saved when read. Use another form of storage.", UserWarning, stacklevel=2) if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str def descr_to_dtype(descr): """ Returns a dtype based off the given description. This is essentially the reverse of `dtype_to_descr()`. It will remove the valueless padding fields created by, i.e. simple fields like dtype('float32'), and then convert the description to its corresponding dtype. Parameters ---------- descr : object The object retreived by dtype.descr. Can be passed to `numpy.dtype()` in order to replicate the input dtype. Returns ------- dtype : dtype The dtype constructed by the description. """ if isinstance(descr, str): # No padding removal needed return
numpy.dtype(descr)
numpy.dtype
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 2: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgf[1] - veh_striking[3]) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2)) veh_RDS = V1_v * np.sin(delta_angle_2) veh_e = 1.5 / veh_RDS elif veh_striking[0] == 3: veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1]) veh_a1 =
np.abs(veh_cgs[0] - veh_striking[3])
numpy.abs
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h / norm(p) x1 = x0 - (dx/2)*p x2 = x0 + (dx/2)*p f1 = fun(x1) f2 = fun(x2) df = f2 - f1 return df / dx elif method == 'cs': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p*1.j f1 = fun(x) df = f1.imag return df / dx else: raise RuntimeError("Never be here.") return LinearOperator((m, n), matvec) def _dense_difference(fun, x0, f0, h, use_one_sided, method): m = f0.size n = x0.size J_transposed = np.empty((n, m)) h_vecs =
np.diag(h)
numpy.diag
import os from PIL import Image import cv2 from os import listdir from os.path import join import matplotlib.pyplot as plt import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab import numpy as np import cmocean import shapely import cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy def select_colormap(field_name): ''' Based on the name if the field it chooses a colormap from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: """This class makes plenty of plots assuming we are plotting Geospatial data (maps). It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) """ _COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize = 8 _font_size = 30 _units = '' _max_imgs_per_row = 4 _mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project _contourf = False # When plotting non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap = True # Selects the colormap based on the name of the field _show_var_names = False # Includes the name of the field name in the titles _additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them) # If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v # and optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm = None # Use to normalize the colormap. For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are passed to the constructor of the class MUST have its name on it. self._disp_images = disp_images self._output_folder = output_folder self._projection = projection bbox = self.getExtent(lats, lons) self._extent = bbox self._lats = lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in kwargs.items(): self.__dict__["_" + arg_name] = arg_value print(self.__dict__["_" + arg_name]) def __getattr__(self, attr): '''Generic getter for all the properties of the class''' return self.__dict__["_" + attr] def __setattr__(self, attr, value): '''Generic setter for all the properties of the class''' self.__dict__["_" + attr] = value def add_colorbar(self, fig, im, ax, show_color_bar, label=""): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5 # TODO how to make this automatic and works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != "": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: """ Plots a 2D img for EOA data. :param c_img: 2D array :param ax: geoaxes :return: """ c_ax = ax if self._flip_data: origin = 'lower' else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = [] pol_lons = [] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field != None: try: u = self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density = 1 linewidth = 3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F"Couldn't add vector field e:{e}") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels = False return im def get_proper_size(self, rows, cols): """ Obtains the proper size for a figure. :param rows: how many rows will the figure have :param cols: how many colswill the figure have :param prop: Proportion is the proportion to use w/h :return: """ if rows == 1: return self._figsize * cols * self._fig_prop, self._figsize else: return self._figsize * cols * self._fig_prop, self._figsize * rows def _close_figure(self): """Depending on what is disp_images, the figures are displayed or just closed""" if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres Args: lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.attrs: print(F"{name} = {getattr(ds, name)}") print("\n========== Dimensions =========") for name in ds.dims: print(F"{name}: {ds[name].shape}") print("\n========== Coordinates =========") for name in ds.coords: print(F"{name}: {ds[name].shape}") print("\n========== Variables =========") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}") def nc_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.ncattrs(): print(F"{name} = {getattr(ds, name)}") print("\n========== Variables =========") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}") def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function plots points in a map :param bbox: :return: ''' if bbox is None: bbox = (-180, 180, -90, 90) if lats is None: lats = self.lats if lons is None: lons = self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): """ Plots multiple z_levels for multiple fields. It uses rows for each depth, and columns for each variable """ create_folder(self._output_folder) orig_cmap = cmap # If the user do not requires any z-leve, then all are plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows = len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels # Verify the index of the z_levels are the original ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names): # Iterate over the fields if rows*cols == 1: # Single figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the min and max colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar # By default we select the colorbar from the name of the variable if self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var) else: # If there is an array of colormaps we select the one for this field if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: # If it is just one cmap, then we use it for all the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels) > 1: c_title += F"Z - level: {c_slice_txt}" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y] :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :] else: c_np_data = np_variables # Single field if rot_90: c_np_data =
np.rot90(c_np_data)
numpy.rot90
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]: raise Exception('clip error') if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2: raise Exception('clip error') perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) is_shrink = False if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]: is_shrink = True synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 '''shrink fiducial points''' center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 fiducial_points_coordinate_copy = fiducial_points_coordinate.copy() shrink_x = im_lr/(perturbed_x_max - perturbed_x_min) shrink_y = im_ud/(perturbed_y_max - perturbed_y_min) fiducial_points_coordinate *= [shrink_x, shrink_y] center_x_l *= shrink_x center_y_l *= shrink_y # fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y] # fiducial_points_coordinate[1:, :1, 0] *= shrink_x # fiducial_points_coordinate[:1, 1:, 1] *= shrink_y # perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape) self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) self.foreORbackground_label = np.zeros_like(self.foreORbackground_label) self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 if is_shrink: fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l] '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img) ''' self.new_shape = save_img_shape self.synthesis_perturbed_img = self.synthesis_perturbed_img[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2, :].copy() self.synthesis_perturbed_label = self.synthesis_perturbed_label[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2, :].copy() self.foreORbackground_label = self.foreORbackground_label[ center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2, center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy() perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0) perturbed_x_min = perturbed_x_ // 2 perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1) perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0) perturbed_y_min = perturbed_y_ // 2 perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1) '''clip perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2 perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) self.new_shape = save_img_shape perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0) perturbed_x_min = perturbed_x_ // 2 perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1) perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0) perturbed_y_min = perturbed_y_ // 2 perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1) self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy() self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy() ''' '''save''' pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) if relativeShift_position == 'relativeShift_v2': self.synthesis_perturbed_label -= pixel_position fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2] self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label ''' synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) # if self.is_perform(0.9, 0.1) or repeat_time > 5: # # if self.is_perform(0.1, 0.9) and repeat_time > 9: # # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0) # # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0) # else: # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1] ''' ''' perturbed_bg_img = perturbed_bg_img.astype(np.float32) perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img HSV perturbed_bg_img = perturbed_bg_img.astype(np.float32) if self.is_perform(0.1, 0.9): if self.is_perform(0.2, 0.8): synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV else: perturbed_bg_img_HSV = perturbed_bg_img perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20 perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_ perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB) perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img_HSV # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] else: synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img # synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771] synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV) H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20 synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_ synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB) self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV ''' '''HSV_v2''' perturbed_bg_img = perturbed_bg_img.astype(np.float32) # if self.is_perform(1, 0): # if self.is_perform(1, 0): if self.is_perform(0.1, 0.9): if self.is_perform(0.2, 0.8): synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV) perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV else: perturbed_bg_img_HSV = perturbed_bg_img perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV) perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label self.synthesis_perturbed_img += perturbed_bg_img_HSV # self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] else: synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy() perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label synthesis_perturbed_img_clip_HSV += perturbed_bg_img synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV) self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV '''''' # cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip) self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0 self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255 self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8) label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32) label[:, :, :2] = self.synthesis_perturbed_label label[:, :, 2] = self.foreORbackground_label # grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16) # synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2) synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2) self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32) # self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32) reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2)))) reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2)))) perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0) perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0]) perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0) perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1]) if im_lr >= im_ud: self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] # self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] else: self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] # self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] '''blur''' if self.is_perform(0.1, 0.9): synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy() if self.is_perform(0.1, 0.9): synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0) else: synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0) if self.is_perform(0.5, 0.5): self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1] else: self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1] '''draw fiducial points''' stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1) cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img) cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3]) '''forward-begin''' self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32) forward_mapping =
np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
numpy.full
import os import sys import click import pickle import sncosmo import numpy as np from astropy.table import Table DATA_PATH = '/home/samdixon/jla_light_curves/' def modify_error(lc, error_floor=0.): """Add an error floor of `error_floor` times the maximum flux of the band to each observation """ data = sncosmo.photdata.photometric_data(lc).normalized(zp=25., zpsys='ab') new_lc = {'time': data.time, 'band': data.band, 'flux': data.flux, 'fluxerr': data.fluxerr, 'zp': data.zp, 'zpsys': data.zpsys} for band in set(data.band): band_cut = data.band==band max_flux_in_band = np.max(data.flux[band_cut]) new_lc['fluxerr'][band_cut] = np.sqrt((error_floor*max_flux_in_band)**2+data.fluxerr[band_cut]**2) new_lc = Table(new_lc, meta=lc.meta) return new_lc def fit_lc_and_save(lc, model_name, save_dir, no_mc): name = lc.meta['SN'] model = sncosmo.Model(source=model_name, effects=[sncosmo.CCM89Dust()], effect_names=['mw'], effect_frames=['obs']) if type(name) is float: name = int(name) z = lc.meta['Z_HELIO'] mwebv = lc.meta['MWEBV'] bounds = {} try: t0 = float(lc.meta['DayMax'].split()[0]) bounds['t0'] = (t0-5, t0+5) except KeyError: try: t0 = np.mean(lc['Date']) bounds['t0'] = (min(lc['Date'])-20, max(lc['Date'])) except KeyError: t0 =
np.mean(lc['time'])
numpy.mean
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] *
np.ones_like(min_1_y)
numpy.ones_like
import numpy as np import pytest import theano import theano.tensor as tt # Don't import test classes otherwise they get tested as part of the file from tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip("pygpu") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input="raise", name=None, ): if mode is None: mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 - 1 dtype = kwargs.pop("dtype", theano.config.floatX) cls = kwargs.pop("cls", None) if len(kwargs) != 0: raise TypeError("Unexpected argument %s", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None: checks = {} _op = op _gpu_op = gpu_op _cases = cases _skip = skip _checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip = _skip checks = _checks def setup_method(self): eval(self.__class__.__module__ + "." + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname, inputs in cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp) for inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while making " "a node with inputs %s" ) % (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while trying to " "make a Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds = f_ref() except Exception as exc: ref_e = exc try: variables = f_tst() except Exception as exc: if ref_e is None: err_msg = ( "Test %s::%s: exception when calling the " "Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise else: # if we raised an exception of the same type we're good. if isinstance(exc, type(ref_e)): return else: err_msg = ( "Test %s::%s: exception raised during test " "call was not the same as the reference " "call (got: %s, expected %s)" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not condition, ( "Test %s::%s: Output %s gave the wrong " "value. With inputs %s, expected %s " "(dtype %s), got %s (dtype %s)." % ( self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype, ) ) for description, check in self.checks.items(): assert check(inputs, variables), ( "Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)" ) % (self.op, testname, description, inputs, variables) Checker.__name__ = name if hasattr(Checker, "__qualname__"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype="float32", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( "cut_gpua_host_transfers", "local_cut_gpua_host_gpua" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just to ensure that it works in theano # libgpuarray has a much more comprehensive suit of tests to # ensure correctness a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 8), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc = makeTester( name="GpuAllocTester", # The +1 is there to allow the lift to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = "float32" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in ["float32", "int8"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape == (2, 3) assert out.dtype == dt f = theano.function( [], [ GpuAllocEmpty("uint64", test_ctx_name)(3, 2), GpuAllocEmpty("uint64", test_ctx_name)(3, 2), ], ) out = f() assert out[0].shape == (3, 2) assert out[0].dtype == "uint64" assert out[1].shape == (3, 2) assert out[1].dtype == "uint64" assert ( len( [ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def test_shape(): x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if theano.config.mode != "FAST_COMPILE": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding("local_shape_to_shape_i") f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix("a") i = tt.iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = ["float64", "float32"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding("constant_folding") self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use join instead of MakeVector since there is no MakeVector on GPU self.make_vector_op = GpuJoin() # this is to avoid errors with limited devices self.floatX = "float32" self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): # Test that we move the node to the GPU # Also test float16 computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype("float16")) o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == "float16" f = theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix("a") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") b = tt.fmatrix("b") b_val = np.asarray(np.random.rand(3, 5), dtype="float32") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) +
np.array(1)
numpy.array
import copy import functools import itertools import numbers import warnings from collections import defaultdict from datetime import timedelta from distutils.version import LooseVersion from typing import ( Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) import numpy as np import pandas as pd import xarray as xr # only for Dataset and DataArray from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils from .indexing import ( BasicIndexer, OuterIndexer, PandasIndexAdapter, VectorizedIndexer, as_indexable, ) from .npcompat import IS_NEP18_ACTIVE from .options import _get_keep_attrs from .pycompat import ( cupy_array_type, dask_array_type, integer_types, is_duck_dask_array, ) from .utils import ( OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, ensure_us_time_resolution, infix_dims, is_duck_array, ) NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( ( indexing.ExplicitlyIndexed, pd.Index, ) + dask_array_type + cupy_array_type ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore VariableType = TypeVar("VariableType", bound="Variable") """Type annotation to be used when methods of Variable return self or a copy of self. When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the output as an instance of the subclass. Usage:: class Variable: def f(self: VariableType, ...) -> VariableType: ... """ class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]": """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable. """ from .dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: obj = Variable(*obj) except (TypeError, ValueError) as error: # use .format() instead of % because it handles tuples consistently raise error.__class__( "Could not convert tuple of form " "(dims, data[, attrs, encoding]): " "{} to Variable.".format(obj) ) elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, (set, dict)): raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj))) elif name is not None: data = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( "cannot set variable %r with %r-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." % (name, data.ndim) ) obj = Variable(name, data, fastpath=True) else: raise TypeError( "unable to convert object into a variable without an " "explicit list of dimensions: %r" % obj ) if name is not None and name in obj.dims: # convert the Variable into an Index if obj.ndim != 1: raise MissingDimensionsError( "%r has more than 1-dimension and the same name as one of its " "dimensions %r. xarray disallows such variables because they " "conflict with the coordinates used to label " "dimensions." % (name, obj.dims) ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexAdapter(data) return data def _possibly_convert_objects(values): """Convert arrays of datetime.datetime and datetime.timedelta objects into datetime64 and timedelta64, according to the pandas convention. Also used for validating that datetime64 and timedelta64 objects are within the valid date range for ns precision, as pandas will raise an error if they are not. """ return np.asarray(pd.Series(values.ravel())).reshape(values.shape) def as_compatible_data(data, fastpath=False): """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", 0) > 0: # can't use fastpath (yet) for scalars return _maybe_wrap_data(data) if isinstance(data, Variable): return data.data if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return _maybe_wrap_data(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) if isinstance(data, pd.Timestamp): # TODO: convert, handle datetime objects, too data = np.datetime64(data.value, "ns") if isinstance(data, timedelta): data = np.timedelta64(getattr(data, "value", data), "ns") # we don't want nested self-described arrays data = getattr(data, "values", data) if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = dtypes.maybe_promote(data.dtype) data = np.asarray(data, dtype=dtype) data[mask] = fill_value else: data =
np.asarray(data)
numpy.asarray
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time =
np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101)
numpy.linspace
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other):
np.bitwise_or(self, other, out=self)
numpy.bitwise_or
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) +
np.min(grid_2d_slim[:, 0])
numpy.min
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] +
np.random.uniform(-self.focussize,self.focussize)
numpy.random.uniform
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype) strings2 =
np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
numpy.array
#!/usr/bin/env python # encoding: utf-8 -*- """ This module contains unit tests of the rmgpy.reaction module. """ import numpy import unittest from external.wip import work_in_progress from rmgpy.species import Species, TransitionState from rmgpy.reaction import Reaction from rmgpy.statmech.translation import Translation, IdealGasTranslation from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor from rmgpy.statmech.vibration import Vibration, HarmonicOscillator from rmgpy.statmech.torsion import Torsion, HinderedRotor from rmgpy.statmech.conformer import Conformer from rmgpy.kinetics import Arrhenius from rmgpy.thermo import Wilhoit import rmgpy.constants as constants ################################################################################ class PseudoSpecies: """ Can be used in place of a :class:`rmg.species.Species` for isomorphism checks. PseudoSpecies('a') is isomorphic with PseudoSpecies('A') but nothing else. """ def __init__(self, label): self.label = label def __repr__(self): return "PseudoSpecies('{0}')".format(self.label) def __str__(self): return self.label def isIsomorphic(self, other): return self.label.lower() == other.label.lower() class TestReactionIsomorphism(unittest.TestCase): """ Contains unit tests of the isomorphism testing of the Reaction class. """ def makeReaction(self,reaction_string): """" Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD' """ reactants, products = reaction_string.split('=') reactants = [PseudoSpecies(i) for i in reactants] products = [PseudoSpecies(i) for i in products] return Reaction(reactants=reactants, products=products) def test1to1(self): r1 = self.makeReaction('A=B') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB'))) def test1to2(self): r1 = self.makeReaction('A=BC') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c'))) def test2to2(self): r1 = self.makeReaction('AB=CD') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde'))) def test2to3(self): r1 = self.makeReaction('AB=CDE') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde'))) class TestReaction(unittest.TestCase): """ Contains unit tests of the Reaction class. """ def setUp(self): """ A method that is called prior to each unit test in this class. """ ethylene = Species( label = 'C2H4', conformer = Conformer( E0 = (44.7127, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (28.0313, 'amu'), ), NonlinearRotor( inertia = ( [3.41526, 16.6498, 20.065], 'amu*angstrom^2', ), symmetry = 4, ), HarmonicOscillator( frequencies = ( [828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54], 'cm^-1', ), ), ], spinMultiplicity = 1, opticalIsomers = 1, ), ) hydrogen = Species( label = 'H', conformer = Conformer( E0 = (211.794, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (1.00783, 'amu'), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) ethyl = Species( label = 'C2H5', conformer = Conformer( E0 = (111.603, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [4.8709, 22.2353, 23.9925], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73], 'cm^-1', ), ), HinderedRotor( inertia = (1.11481, 'amu*angstrom^2'), symmetry = 6, barrier = (0.244029, 'kJ/mol'), semiclassical = None, ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) TS = TransitionState( label = 'TS', conformer = Conformer( E0 = (266.694, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [6.78512, 22.1437, 22.2114], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88], 'cm^-1', ), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), frequency = (-750.232, 'cm^-1'), ) self.reaction = Reaction( reactants = [hydrogen, ethylene], products = [ethyl], kinetics = Arrhenius( A = (501366000.0, 'cm^3/(mol*s)'), n = 1.637, Ea = (4.32508, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2500, 'K'), ), transitionState = TS, ) # CC(=O)O[O] acetylperoxy = Species( label='acetylperoxy', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")), ) # C[C]=O acetyl = Species( label='acetyl', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")), ) # [O][O] oxygen = Species( label='oxygen', thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")), ) self.reaction2 = Reaction( reactants=[acetyl, oxygen], products=[acetylperoxy], kinetics = Arrhenius( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, Ea = (0.0, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ), ) def testIsIsomerization(self): """ Test the Reaction.isIsomerization() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertTrue(isomerization.isIsomerization()) self.assertFalse(association.isIsomerization()) self.assertFalse(dissociation.isIsomerization()) self.assertFalse(bimolecular.isIsomerization()) def testIsAssociation(self): """ Test the Reaction.isAssociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isAssociation()) self.assertTrue(association.isAssociation()) self.assertFalse(dissociation.isAssociation()) self.assertFalse(bimolecular.isAssociation()) def testIsDissociation(self): """ Test the Reaction.isDissociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isDissociation()) self.assertFalse(association.isDissociation()) self.assertTrue(dissociation.isDissociation()) self.assertFalse(bimolecular.isDissociation()) def testHasTemplate(self): """ Test the Reaction.hasTemplate() method. """ reactants = self.reaction.reactants[:] products = self.reaction.products[:] self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants = self.reaction2.reactants[:] products = self.reaction2.products[:] self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) def testEnthalpyOfReaction(self): """ Test the Reaction.getEnthalpyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']] Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2) def testEntropyOfReaction(self): """ Test the Reaction.getEntropyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']] Slist = self.reaction2.getEntropiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Slist[i], Slist0[i], 2) def testFreeEnergyOfReaction(self): """ Test the Reaction.getFreeEnergyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']] Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2) def testEquilibriumConstantKa(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']] Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka') for i in range(len(Tlist)): self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4) def testEquilibriumConstantKc(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']] Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc') for i in range(len(Tlist)): self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4) def testEquilibriumConstantKp(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']] Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp') for i in range(len(Tlist)): self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4) def testStoichiometricCoefficient(self): """ Test the Reaction.getStoichiometricCoefficient() method. """ for reactant in self.reaction.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1) for product in self.reaction.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1) for reactant in self.reaction2.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0) for product in self.reaction2.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0) def testRateCoefficient(self): """ Test the Reaction.getRateCoefficient() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 for T in Tlist: self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6) def testGenerateReverseRateCoefficient(self): """ Test the Reaction.generateReverseRateCoefficient() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 reverseKinetics = self.reaction2.generateReverseRateCoefficient() for T in Tlist: kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T) kr = reverseKinetics.getRateCoefficient(T) self.assertAlmostEqual(kr0 / kr, 1.0, 0) def testGenerateReverseRateCoefficientArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Arrhenius format. """ original_kinetics = Arrhenius( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, Ea = (0.0, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(original_kinetics.Tmin.value_si, original_kinetics.Tmax.value_si, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) @work_in_progress def testGenerateReverseRateCoefficientArrheniusEP(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the ArrheniusEP format. """ from rmgpy.kinetics import ArrheniusEP original_kinetics = ArrheniusEP( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, alpha = 0.5, E0 = (41.84, 'kJ/mol'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientPDepArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format. """ from rmgpy.kinetics import PDepArrhenius arrhenius0 = Arrhenius( A = (1.0e6,"s^-1"), n = 1.0, Ea = (10.0,"kJ/mol"), T0 = (300.0,"K"), Tmin = (300.0,"K"), Tmax = (2000.0,"K"), comment = """This data is completely made up""", ) arrhenius1 = Arrhenius( A = (1.0e12,"s^-1"), n = 1.0, Ea = (20.0,"kJ/mol"), T0 = (300.0,"K"), Tmin = (300.0,"K"), Tmax = (2000.0,"K"), comment = """This data is completely made up""", ) pressures = numpy.array([0.1, 10.0]) arrhenius = [arrhenius0, arrhenius1] Tmin = 300.0 Tmax = 2000.0 Pmin = 0.1 Pmax = 10.0 comment = """This data is completely made up""" original_kinetics = PDepArrhenius( pressures = (pressures,"bar"), arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientMultiArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format. """ from rmgpy.kinetics import MultiArrhenius pressures = numpy.array([0.1, 10.0]) Tmin = 300.0 Tmax = 2000.0 Pmin = 0.1 Pmax = 10.0 comment = """This data is completely made up""" arrhenius = [ Arrhenius( A = (9.3e-14,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (1.4e-9,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ] original_kinetics = MultiArrhenius( arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientMultiPDepArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format. """ from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius Tmin = 350. Tmax = 1500. Pmin = 1e-1 Pmax = 1e1 pressures = numpy.array([1e-1,1e1]) comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)' arrhenius = [ PDepArrhenius( pressures = (pressures,"bar"), arrhenius = [ Arrhenius( A = (9.3e-16,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (9.3e-14,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ], Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ), PDepArrhenius( pressures = (pressures,"bar"), arrhenius = [ Arrhenius( A = (1.4e-11,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (1.4e-9,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ], Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ), ] original_kinetics = MultiPDepArrhenius( arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientThirdBody(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format. """ from rmgpy.kinetics import ThirdBody arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" thirdBody = ThirdBody( arrheniusLow = arrheniusLow, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = thirdBody self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientLindemann(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format. """ from rmgpy.kinetics import Lindemann arrheniusHigh = Arrhenius( A = (1.39e+16,"cm^3/(mol*s)"), n = -0.534, Ea = (2.243,"kJ/mol"), T0 = (1,"K"), ) arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" lindemann = Lindemann( arrheniusHigh = arrheniusHigh, arrheniusLow = arrheniusLow, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = lindemann self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientTroe(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Troe format. """ from rmgpy.kinetics import Troe arrheniusHigh = Arrhenius( A = (1.39e+16,"cm^3/(mol*s)"), n = -0.534, Ea = (2.243,"kJ/mol"), T0 = (1,"K"), ) arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) alpha = 0.783 T3 = 74 T1 = 2941 T2 = 6964 efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" troe = Troe( arrheniusHigh = arrheniusHigh, arrheniusLow = arrheniusLow, alpha = alpha, T3 = (T3,"K"), T1 = (T1,"K"), T2 = (T2,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = troe self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testTSTCalculation(self): """ A test of the transition state theory k(T) calculation function, using the reaction H + C2H4 -> C2H5. """ Tlist = 1000.0/
numpy.arange(0.4, 3.35, 0.01)
numpy.arange
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time =
np.asarray(time)
numpy.asarray
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype) strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join( f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): """ Test transforming matrix columns to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T] ) expected = "{}\n".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): """ Test the function fails for arrays of different sizes. """ x = np.arange(5) y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print("This should have failed") def test_virtualfile_from_matrix(): """ Test transforming a matrix to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): """ Test transforming a slice of a larger array to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (10, 6) for dtype in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3 data = full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): """ Pass vectors to a dataset using pandas Series. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 13 for dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=
np.arange(size, size * 2, 1, dtype=dtype)
numpy.arange
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 =
np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
numpy.linspace
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr = np.array(out_arr, copy=False) return out_arr out_arr.units = unit if out_arr.size == 1: return YTQuantity(np.array(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 return YTArray(np.array(out_arr), unit) return ret_class(np.array(out_arr, copy=False), unit) else: # numpy version equal to or newer than 1.13 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): func = getattr(ufunc, method) if 'out' in kwargs: out_orig = kwargs.pop('out') out = np.asarray(out_orig[0]) else: out = None if len(inputs) == 1: _, inp, u = get_inp_u_unary(ufunc, inputs) out_arr = func(np.asarray(inp), out=out, **kwargs) if ufunc in (multiply, divide) and method == 'reduce': power_sign = POWER_SIGN_MAPPING[ufunc] if 'axis' in kwargs and kwargs['axis'] is not None: unit = u**(power_sign*inp.shape[kwargs['axis']]) else: unit = u**(power_sign*inp.size) else: unit = self._ufunc_registry[ufunc](u) ret_class = type(self) elif len(inputs) == 2: unit_operator = self._ufunc_registry[ufunc] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class) elif unit_operator is preserve_units: inps, units = handle_preserve_units( inps, units, ufunc, ret_class) unit = unit_operator(*units) out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs) if unit_operator in (multiply_units, divide_units): out, out_arr, unit = handle_multiply_divide_units( unit, units, out, out_arr) else: raise RuntimeError( "Support for the %s ufunc with %i inputs has not been" "added to YTArray." % (str(ufunc), len(inputs))) if unit is None: out_arr = np.array(out_arr, copy=False) elif ufunc in (modf, divmod_): out_arr = tuple((ret_class(o, unit) for o in out_arr)) elif out_arr.size == 1: out_arr = YTQuantity(np.asarray(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 out_arr = YTArray(np.asarray(out_arr), unit) else: out_arr = ret_class(np.asarray(out_arr), unit) if out is not None: out_orig[0].flat[:] = out.flat[:] if isinstance(out_orig[0], YTArray): out_orig[0].units = unit return out_arr def copy(self, order='C'): return type(self)(np.copy(np.asarray(self)), self.units) def __array_finalize__(self, obj): if obj is None and hasattr(self, 'units'): return self.units = getattr(obj, 'units', NULL_UNIT) def __pos__(self): """ Posify the data. """ # this needs to be defined for all numpy versions, see # numpy issue #9081 return type(self)(super(YTArray, self).__pos__(), self.units) @return_arr def dot(self, b, out=None): return super(YTArray, self).dot(b), self.units*b.units def __reduce__(self): """Pickle reduction method See the documentation for the standard library pickle module: http://docs.python.org/2/library/pickle.html Unit metadata is encoded in the zeroth element of third element of the returned tuple, itself a tuple used to restore the state of the ndarray. This is always defined for numpy arrays. """ np_ret = super(YTArray, self).__reduce__() obj_state = np_ret[2] unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],) new_ret = np_ret[:2] + unit_state + np_ret[3:] return new_ret def __setstate__(self, state): """Pickle setstate method This is called inside pickle.read() and restores the unit data from the metadata extracted in __reduce__ and then serialized by pickle. """ super(YTArray, self).__setstate__(state[1:]) try: unit, lut = state[0] except TypeError: # this case happens when we try to load an old pickle file # created before we serialized the unit symbol lookup table # into the pickle file unit, lut = str(state[0]), default_unit_symbol_lut.copy() # need to fix up the lut if the pickle was saved prior to PR #1728 # when the pickle format changed if len(lut['m']) == 2: lut.update(default_unit_symbol_lut) for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]: lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}') registry = UnitRegistry(lut=lut, add_default_symbols=False) self.units = Unit(unit, registry=registry) def __deepcopy__(self, memodict=None): """copy.deepcopy implementation This is necessary for stdlib deepcopy of arrays and quantities. """ if memodict is None: memodict = {} ret = super(YTArray, self).__deepcopy__(memodict) return type(self)(ret, copy.deepcopy(self.units)) class YTQuantity(YTArray): """ A scalar associated with a unit. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). registry : A UnitRegistry object The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Examples -------- >>> from yt import YTQuantity >>> a = YTQuantity(1, 'cm') >>> b = YTQuantity(2, 'm') >>> a + b 201.0 cm >>> b + a 2.01 m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTQuantity(12, 'g/cm**3') >>> np.abs(a) 12 g/cm**3 and strip them when it would be annoying to deal with them. >>> print(np.log10(a)) 1.07918124605 YTQuantity is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.quan(5, 'code_length') >>> a.in_cgs() 1.543e+25 cm This is equivalent to: >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ def __new__(cls, input_scalar, input_units=None, registry=None, dtype=np.float64, bypass_validation=False): if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)): raise RuntimeError("YTQuantity values must be numeric") ret = YTArray.__new__(cls, input_scalar, input_units, registry, dtype=dtype, bypass_validation=bypass_validation) if ret.size > 1: raise RuntimeError("YTQuantity instances must be scalars") return ret def __repr__(self): return str(self) def validate_numpy_wrapper_units(v, arrs): if not any(isinstance(a, YTArray) for a in arrs): return v if not all(isinstance(a, YTArray) for a in arrs): raise RuntimeError("Not all of your arrays are YTArrays.") a1 = arrs[0] if not all(a.units == a1.units for a in arrs[1:]): raise RuntimeError("Your arrays must have identical units.") v.units = a1.units return v def uconcatenate(arrs, axis=0): """Concatenate a sequence of arrays. This wrapper around numpy.concatenate preserves units. All input arrays must have the same units. See the documentation of numpy.concatenate for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uconcatenate((A, B)) YTArray([ 1., 2., 3., 2., 3., 4.]) cm """ v = np.concatenate(arrs, axis=axis) v = validate_numpy_wrapper_units(v, arrs) return v def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None): """Applies the cross product to two YT arrays. This wrapper around numpy.cross preserves units. See the documentation of numpy.cross for full details. """ v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) units = arr1.units * arr2.units arr = YTArray(v, units, registry=registry) return arr def uintersect1d(arr1, arr2, assume_unique=False): """Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uintersect1d(A, B) YTArray([ 2., 3.]) cm """ v = np.intersect1d(arr1, arr2, assume_unique=assume_unique) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def uunion1d(arr1, arr2): """Find the union of two arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uunion1d(A, B) YTArray([ 1., 2., 3., 4.]) cm """ v = np.union1d(arr1, arr2) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def unorm(data, ord=None, axis=None, keepdims=False): """Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. The keepdims argument is ignored if the version of numpy installed is older than numpy 1.10.0. """ if LooseVersion(np.__version__) < LooseVersion('1.10.0'): norm = np.linalg.norm(data, ord=ord, axis=axis) else: norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims) if norm.shape == (): return YTQuantity(norm, data.units) return YTArray(norm, data.units) def udot(op1, op2): """Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. """ dot = np.dot(op1.d, op2.d) units = op1.units*op2.units if dot.shape == (): return YTQuantity(dot, units) return YTArray(dot, units) def uvstack(arrs): """Stack arrays in sequence vertically (row wise) while preserving units This is a wrapper around np.vstack that preserves units. """ v = np.vstack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def uhstack(arrs): """Stack arrays in sequence horizontally (column wise) while preserving units This is a wrapper around np.hstack that preserves units. """ v = np.hstack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def ustack(arrs, axis=0): """Join a sequence of arrays along a new axis while preserving units The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. This is a wrapper around np.stack that preserves units. """ v = np.stack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def array_like_field(data, x, field): field = data._determine_fields(field)[0] if isinstance(field, tuple): finfo = data.ds._get_field_info(field[0],field[1]) else: finfo = data.ds._get_field_info(field) if finfo.sampling_type == 'particle': units = finfo.output_units else: units = finfo.units if isinstance(x, YTArray): arr = copy.deepcopy(x) arr.convert_to_units(units) return arr if isinstance(x, np.ndarray): return data.ds.arr(x, units) else: return data.ds.quan(x, units) def get_binary_op_return_class(cls1, cls2): if cls1 is cls2: return cls1 if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)): return cls2 if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)): return cls1 if issubclass(cls1, YTQuantity): return cls2 if issubclass(cls2, YTQuantity): return cls1 if issubclass(cls1, cls2): return cls1 if issubclass(cls2, cls1): return cls2 else: raise RuntimeError("Undefined operation for a YTArray subclass. " "Received operand types (%s) and (%s)" % (cls1, cls2)) def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'): r""" Load YTArrays with unit information from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : str Filename to read. dtype : data-type, optional Data-type of the resulting array; default: float. delimiter : str, optional The string used to separate values. By default, this is any whitespace. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. comments : str, optional The character used to indicate the start of a comment; default: '#'. Examples -------- >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t") """ f = open(fname, 'r') next_one = False units = [] num_cols = -1 for line in f.readlines(): words = line.strip().split() if len(words) == 0: continue if line[0] == comments: if next_one: units = words[1:] if len(words) == 2 and words[1] == "Units": next_one = True else: # Here we catch the first line of numbers try: col_words = line.strip().split(delimiter) for word in col_words: float(word) num_cols = len(col_words) break except ValueError: mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0]) f.close() if len(units) != num_cols: mylog.warning("Malformed or incomplete units header. Arrays will be " "dimensionless!") units = ["dimensionless"]*num_cols arrays = np.loadtxt(fname, dtype=dtype, comments=comments, delimiter=delimiter, converters=None, unpack=True, usecols=usecols, ndmin=0) if usecols is not None: units = [units[col] for col in usecols] mylog.info("Array units: %s" % ", ".join(units)) return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)]) def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='', footer='', comments='#'): r""" Write YTArrays with unit information to a text file. Parameters ---------- fname : str The file to write the YTArrays to. arrays : list of YTArrays or single YTArray The array(s) to write to the file. fmt : str or sequence of strs, optional A single format (%10.5f), or a sequence of formats. delimiter : str, optional String or character separating columns. header : str, optional String that will be written at the beginning of the file, before the unit header. footer : str, optional String that will be written at the end of the file. comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``yt.loadtxt``. Examples -------- >>> sp = ds.sphere("c", (100,"kpc")) >>> a = sp["density"] >>> b = sp["temperature"] >>> c = sp["velocity_x"] >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t") """ if not isinstance(arrays, list): arrays = [arrays] units = [] for array in arrays: if hasattr(array, "units"): units.append(str(array.units)) else: units.append("dimensionless") if header != '': header += '\n' header += " Units\n " + '\t'.join(units) np.savetxt(fname,
np.transpose(arrays)
numpy.transpose
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X =
np.arange(100)
numpy.arange
import numpy as np import sys import os from PIL import Image from visu.helper_functions import save_image from scipy.spatial.transform import Rotation as R from helper import re_quat import copy import torch import numpy as np import k3d class Visualizer(): def __init__(self, p_visu, writer=None): if p_visu[-1] != '/': p_visu = p_visu + '/' self.p_visu = p_visu self.writer = writer if not os.path.exists(self.p_visu): os.makedirs(self.p_visu) def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] points:= points of the object model [length,x,y,z] trans: [1,3] rot: [3,3] """ img_d = copy.deepcopy(img) points = np.dot(points, rot_mat.T) points = np.add(points, trans[0, :]) for i in range(0, points.shape[0]): p_x = points[i, 0] p_y = points[i, 1] p_z = points[i, 2] u = int(((p_x / p_z) * cam_fx) + cam_cx) v = int(((p_y / p_z) * cam_fy) + cam_cy) try: img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255 img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 except: #print("out of bounce") pass if jupyter: display(Image.fromarray(img_d)) if store: #store_ar = (img_d* 255).round().astype(np.uint8) #print("IMAGE D:" ,img_d,img_d.shape ) save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, img_d.astype( np.uint8), global_step=epoch, dataformats='HWC') def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] """ if isinstance(b, dict): rmin = b['rmin'] rmax = b['rmax'] cmin = b['cmin'] cmax = b['cmax'] # ToDo check Input data img_d = np.array(copy.deepcopy(img)) c = [0, 0, 255] rmin_mi = max(0, rmin - str_width) rmin_ma = min(img_d.shape[0], rmin + str_width) rmax_mi = max(0, rmax - str_width) rmax_ma = min(img_d.shape[0], rmax + str_width) cmin_mi = max(0, cmin - str_width) cmin_ma = min(img_d.shape[1], cmin + str_width) cmax_mi = max(0, cmax - str_width) cmax_ma = min(img_d.shape[1], cmax + str_width) img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c print("STORE", store) img_d = img_d.astype(np.uint8) if store: #store_ar = (img_d* 255).round().astype(np.uint8) save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu) if jupyter: display(Image.fromarray(img_d)) if self.writer is not None: self.writer.add_image(tag, img_d.astype( np.uint8), global_step=epoch, dataformats='HWC') def plot_pcd(x, point_size=0.005, c='g'): """ x: point_nr,3 """ if c == 'b': k = 245 elif c == 'g': k = 25811000 elif c == 'r': k = 11801000 elif c == 'black': k = 2580 else: k = 2580 colors = np.ones(x.shape[0]) * k plot = k3d.plot(name='points') plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size) plot += plt_points plt_points.shader = '3d' plot.display() def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'): if c1 == 'b': k = 245 elif c1 == 'g': k = 25811000 elif c1 == 'r': k = 11801000 elif c1 == 'black': k = 2580 else: k = 2580 if c2 == 'b': k2 = 245 elif c2 == 'g': k2 = 25811000 elif c2 == 'r': k2 = 11801000 elif c2 == 'black': k2 = 2580 else: k2 = 2580 col1 = np.ones(x.shape[0]) * k col2 = np.ones(y.shape[0]) * k2 plot = k3d.plot(name='points') plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size) plot += plt_points plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size) plot += plt_points plt_points.shader = '3d' plot.display() class SequenceVisualizer(): def __init__(self, seq_data, images_path, output_path=None): self.seq_data = seq_data self.images_path = images_path self.output_path = output_path def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'): seq_data = self.seq_data images_path = self.images_path output_path = self.output_path frame = seq_data[seq_no][frame_no] unique_desig = frame['dl_dict']['unique_desig'][0] if pose_type == 'ground_truth': # ground truth t = frame['dl_dict']['gt_trans'].reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() elif pose_type == 'filtered': # filter pred t = np.array(frame['filter_pred']['t']).reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['filter_pred']['r_wxyz']), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() elif pose_type == 'final_pred_obs': # final pred t = np.array(frame['final_pred_obs']['t']).reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['final_pred_obs']['r_wxyz']), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() else: raise Exception('Pose type not implemented.') w = 2 if type(unique_desig) != str: im = np.array(Image.open( images_path + unique_desig[0] + '-color.png')) # ycb else: im = np.array(Image.open( images_path + unique_desig + '.png')) # laval img_d = copy.deepcopy(im) dl_dict = frame['dl_dict'] points = copy.deepcopy( seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :]) points =
np.dot(points, rot.T)
numpy.dot
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSourceAngle(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. For this lense an additional z-boost is added (Angle of incidence in z-direction). """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSourceAngle, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.angle = angle self.throw = 0 self.source_id = "LensSourceAngle_" + str(id(self)) def photon(self): photon = Photon() photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) boost = y*np.tan(self.angle) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] + boost direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class CylindricalSource(object): """ A source for photons emitted in a random direction and position inside a cylinder(radius, length) """ def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10): super(CylindricalSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.shape = Cylinder(radius = radius, length = length) self.radius = radius self.length = length self.throw = 0 self.source_id = "CylindricalSource_" + str(id(self)) def translate(self, translation): self.shape.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.shape.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position of emission phi = np.random.uniform(0., 2*np.pi) r = np.random.uniform(0.,self.radius) x = r*np.cos(phi) y = r*np.sin(phi) z = np.random.uniform(0.,self.length) local_center = (x,y,z) photon.position = transform_point(local_center, self.shape.transform) # Direction of emission (no need to transform if meant to be isotropic) phi = np.random.uniform(0.,2*np.pi) theta = np.random.uniform(0.,np.pi) x = np.cos(phi)*np.sin(theta) y = np.sin(phi)*np.sin(theta) z = np.cos(theta) local_direction = (x,y,z) photon.direction = local_direction # Set wavelength of photon if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength # Further initialisation photon.active = True return photon class PointSource(object): """ A point source that emits randomly in solid angle specified by phimin, ..., thetamax """ def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi): super(PointSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.center = center self.phimin = phimin self.phimax = phimax self.thetamin = thetamin self.thetamax = thetamax self.throw = 0 self.source_id = "PointSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 phi = np.random.uniform(self.phimin, self.phimax) theta = np.random.uniform(self.thetamin, self.thetamax) x = np.cos(phi)*np.sin(theta) y = np.sin(phi)*
np.sin(theta)
numpy.sin
import numpy as np import tensorflow as tf H = 2 N = 2 M = 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): """ Takes masked softmax over given dimension of logits. Inputs: logits: Numpy array. We want to take softmax over dimension dim. mask: Numpy array of same shape as logits. Has 1s where there's real data in logits, 0 where there's padding dim: int. dimension over which to take softmax Returns: masked_logits: Numpy array same shape as logits. This is the same as logits, but with 1e30 subtracted (i.e. very large negative number) in the padding locations. prob_dist: Numpy array same shape as logits. The result of taking softmax over masked_logits in given dimension. Should be 0 in padding locations. Should sum to 1 over given dimension. """ exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1 result = (contexts * q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask = np.array([True, True, False]) # BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2) # BS x N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S = BS x N x M # contexts = BS x N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== "__main__": w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H c = np.tile(c, [BS, 1, 1]) q =
np.tile(q, [BS, 1, 1])
numpy.tile
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) C = [0.1,1,2,5,10] solver = ['newton-cg','saga','sag'] best_params = dict() best_score = 0.0 for c in C: for s in solver: start = time.time() log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c) log.fit(X_train, y_train) predictions = log.predict(X_val) print("###########################################") print("LR with C =",c,'and solver = ',s) print("Results using embeddings from the", layer_json, "file") print(classification_report(y_val, predictions)) f1 = f1_score(y_val, predictions) if f1 > best_score: best_score = f1 best_params['c'] = c best_params['solver'] = s print("F1 score using Logistic Regression:",f1) print("###########################################") end = time.time() running_time = end - start print("Running time:"+str(running_time)) def visualize_DNN(file_to_save): ''' Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd :param file_to_save: the png file that the architecture of the DNN will be saved. :return: None ''' model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) plot_model(model, to_file=file_to_save, show_shapes=True) def save_model(sentences_list,layer_json,dataset_csv,pkl): dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list, layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb =
np.asarray(sentence_emb)
numpy.asarray
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h / norm(p) x1 = x0 - (dx/2)*p x2 = x0 + (dx/2)*p f1 = fun(x1) f2 = fun(x2) df = f2 - f1 return df / dx elif method == 'cs': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p*1.j f1 = fun(x) df = f1.imag return df / dx else: raise RuntimeError("Never be here.") return LinearOperator((m, n), matvec) def _dense_difference(fun, x0, f0, h, use_one_sided, method): m = f0.size n = x0.size J_transposed = np.empty((n, m)) h_vecs = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + h_vecs[i] dx = x[i] - x0[i] # Recompute dx as exactly representable number. df = fun(x) - f0 elif method == '3-point' and use_one_sided[i]: x1 = x0 + h_vecs[i] x2 = x0 + 2 * h_vecs[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3.0 * f0 + 4 * f1 - f2 elif method == '3-point' and not use_one_sided[i]: x1 = x0 - h_vecs[i] x2 = x0 + h_vecs[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 elif method == 'cs': f1 = fun(x0 + h_vecs[i]*1.j) df = f1.imag dx = h_vecs[i, i] else: raise RuntimeError("Never be here.") J_transposed[i] = df / dx if m == 1: J_transposed = np.ravel(J_transposed) return J_transposed.T def _sparse_difference(fun, x0, f0, h, use_one_sided, structure, groups, method): m = f0.size n = x0.size row_indices = [] col_indices = [] fractions = [] n_groups = np.max(groups) + 1 for group in range(n_groups): # Perturb variables which are in the same group simultaneously. e = np.equal(group, groups) h_vec = h * e if method == '2-point': x = x0 + h_vec dx = x - x0 df = fun(x) - f0 # The result is written to columns which correspond to perturbed # variables. cols, = np.nonzero(e) # Find all non-zero elements in selected columns of Jacobian. i, j, _ = find(structure[:, cols]) # Restore column indices in the full array. j = cols[j] elif method == '3-point': # Here we do conceptually the same but separate one-sided # and two-sided schemes. x1 = x0.copy() x2 = x0.copy() mask_1 = use_one_sided & e x1[mask_1] += h_vec[mask_1] x2[mask_1] += 2 * h_vec[mask_1] mask_2 = ~use_one_sided & e x1[mask_2] -= h_vec[mask_2] x2[mask_2] += h_vec[mask_2] dx = np.zeros(n) dx[mask_1] = x2[mask_1] - x0[mask_1] dx[mask_2] = x2[mask_2] - x1[mask_2] f1 = fun(x1) f2 = fun(x2) cols, = np.nonzero(e) i, j, _ = find(structure[:, cols]) j = cols[j] mask = use_one_sided[j] df = np.empty(m) rows = i[mask] df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] rows = i[~mask] df[rows] = f2[rows] - f1[rows] elif method == 'cs': f1 = fun(x0 + h_vec*1.j) df = f1.imag dx = h_vec cols, =
np.nonzero(e)
numpy.nonzero
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr =
np.array(out_arr, copy=False)
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side =
np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
numpy.linspace
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_scaled_2d_slim[slim_index, 0] = ( -(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5) * pixel_scales[0] ) grid_scaled_2d_slim[slim_index, 1] = ( grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5 ) * pixel_scales[1] return grid_scaled_2d_slim @numba_util.jit() def grid_pixel_centres_2d_from( grid_scaled_2d: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d: np.ndarray The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2). Examples -------- grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for y in range(grid_scaled_2d.shape[0]): for x in range(grid_scaled_2d.shape[1]): grid_pixels_2d[y, x, 0] = int( (-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d[y, x, 1] = int( (grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d @numba_util.jit() def relocated_grid_via_jit_from(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border, where the border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*). This is performed as follows: 1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2: Compute the radial distance of every grid coordinate from the origin. 3: For every coordinate, find its nearest pixel in the border. 4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired border pixel's radial distance. 5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border (if its inside the border, do nothing). The method can be used on uniform or irregular grids, however for irregular grids the border of the 'image-plane' mask is used to define border pixels. Parameters ---------- grid : Grid2D The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it. border_grid : Grid2D The grid of border (y,x) coordinates. """ grid_relocated = np.zeros(grid.shape) grid_relocated[:, :] = grid[:, :] border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt( np.add( np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(
np.subtract(border_grid[:, 1], border_origin[1])
numpy.subtract
import gym import numpy as np from itertools import product import matplotlib.pyplot as plt def print_policy(Q, env): """ This is a helper function to print a nice policy from the Q function""" moves = [u'←', u'↓',u'→', u'↑'] if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = ' ' for s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] = u'·' print('\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in policy])) def plot_V(Q, env): """ This is a helper function to plot the state values from the Q function""" fig = plt.figure() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape V = np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): """ This is a helper function to plot the Q function """ from matplotlib import colors, patches fig = plt.figure() ax = fig.gca() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape up = np.array([[0, 1], [0.5, 0.5], [1,1]]) down = np.array([[0, 0], [0.5, 0.5], [1,0]]) left = np.array([[0, 0], [0.5, 0.5], [0,1]]) right = np.array([[1, 0], [0.5, 0.5], [1,1]]) tri = [left, down, right, up] pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in range(len(Q)): idx =
np.unravel_index(s, dims)
numpy.unravel_index
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=
np.ones(1)
numpy.ones
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert
np.all(lq ** power == 1.)
numpy.all