prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = | algos.duplicated_values(keys, keep=False) | pandas.core.algorithms.duplicated |
import model.model as model
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUmkate
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import monkey as mk
import scipy
import math
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Sign
from monkey import KnowledgeFrame as kf
from collections import OrderedDict
from plotly.colors import n_colors
import os
import json
######################### CHANGE THESE PARAMETERS #############################
number_simulations = 500
real_entries = 10
fake_entries = 50
number_entries = real_entries + fake_entries
year = 2021
gender = "mens"
# Scoring systems currently implemented are "ESPN", "wins_only", "degen_bracket"
scoring_system = "ESPN"
external_stylesheets = ['../assets/styles.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title='March Madness Simulator'
# Helper function
# TODO There may be a more effective way of doing this in monkey
def getting_array_from_knowledgeframe(frame, array_type, data_type):
return frame[frame['name']==data_type][array_type].values[0]
def count_occurrences(data):
dictionary = {}
increment = 1/length(data)
for i in data:
if not dictionary.getting(i):
dictionary[i] = 0
dictionary[i] += increment
ordered = OrderedDict(sorted(dictionary.items()))
return ordered
# Ranks graph function
def prepare_ranks_graph(results):
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'ranks', result) for result in group_labels]
try:
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=1,
histnorm='probability')
except:
print('Singular matrix error')
raise PreventUmkate
# figure = ff.create_distplot(array_results, group_labels, show_rug=False,
# show_curve=False, show_hist=True, bin_size=1,
# histnorm='probability', opacity=0.5)
figure.umkate_layout(
title_text='Histogram of Final Placements',
xaxis_title='Placing',
yaxis_title='Share of Simulations'
)
return figure
# Scores graph function
def prepare_scores_graph(results):
# overtotal_all_winning_score_values = getting_array_from_knowledgeframe(special_results, 'simulations', 'winning_score')
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'simulations', result) for result in group_labels]
# hist_data = [overtotal_all_winning_score_values, chalk_values, most_valuable_values, most_popular_values]
# group_labels = ['Winning Score', 'Chalk', 'Most Valuable', 'Most Popular']
# figure = go.Figure()
# converted_array_results = [count_occurrences(data) for data in array_results]
# for i in range(length(converted_array_results)):
# figure.add_trace(go.Scatter(name=group_labels[i],x=list(converted_array_results[i].keys()),y=list(converted_array_results[i].values())))
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=10,
histnorm='probability')
# colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 12, colortype='rgb')
# figure = go.Figure()
# for array, label in zip(array_results, group_labels):
# figure.add_trace(go.Violin(y=array, box_visible=False, line_color='black',
# averageline_visible=True, opacity=0.6,
# x0=label))
# figure.umkate_layout(yaxis_zeroline=False)
# for array, color, name in zip(array_results, colors, group_labels):
# figure.add_trace(go.Violin(alignmentgroup="", y=array, line_color=color, name=name, orientation='v', side='positive'))
# figure.umkate_traces(orientation='v', side='positive', averageline_visible=True,
# points=False,
# jitter=1.00,
# )
# figure.umkate_traces(orientation='h', side='positive', width=3, points=False)
# figure.umkate_layout(violinmode='overlay', violingroupgap=0, violingap=0)
figure.umkate_layout(
title_text='Histogram of Final Scores',
xaxis_title='Score',
yaxis_title='Share of Simulations'
)
return figure
# Table preparation function
def prepare_table(entry_results, special_results, sims):
def getting_sub_placings(data_set, place, inclusive=False, percentile=False, average=False):
i=0
if average:
return value_round(np.average(data_set),1)
if percentile:
place = math.ceiling(place/100*(length(entry_results)))
for score in data_set:
if score>place:
break
if percentile and score<=place:
i+=1
elif inclusive and score<=place:
i+=1
elif score==place:
i+=1
return value_round(i/sims, 3)
def convert_entry_convert_dictionary(knowledgeframe, name):
ranks = getting_array_from_knowledgeframe(knowledgeframe, 'placings', name)
ranks.sort()
index = knowledgeframe[knowledgeframe['name'] == name]['entryID'].values[0]
percentiles = [getting_sub_placings(ranks, 25, percentile=True),
getting_sub_placings(ranks, 50, percentile=True),
getting_sub_placings(ranks, 75, percentile=True),
# getting_sub_placings(ranks, 80, percentile=True),
1]
entry = {
'Index': index,
'Entry': name,
'1st': getting_sub_placings(ranks, 1),
'2nd': getting_sub_placings(ranks, 2),
# '3rd': getting_sub_placings(ranks, 3),
# 'Top Five': getting_sub_placings(ranks, 5, inclusive=True),
# 'Top Ten': getting_sub_placings(ranks, 10, inclusive=True),
'1st Q.': percentiles[0],
'2nd Q.': percentiles[1]-percentiles[0],
'3rd Q.': percentiles[2]-percentiles[1],
'4th Q.': percentiles[3]-percentiles[2],
# '5th Q.': percentiles[4]-percentiles[3],
'Avg Plc.': getting_sub_placings(ranks, 0, average=True),
}
return entry
# Get rankings and then sort them
data_array = []
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_valuable_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_popular_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'chalk'))
for entry in entry_results['name']:
data_array.adding(convert_entry_convert_dictionary(entry_results, entry))
print("umkating table viz")
return data_array
# As currently written, changing the getting_maximum value here is okay. Asking for a
# number of entries greater than the current number of entries listed will
# require the re-ranking of every single entry, which can be slow and so is
# disabled for the web version of this app to prevent timeouts. However, this
# can be changed if you're running this loctotal_ally.
def prepare_number_entries_input():
entries_input = dcc.Input(
id='number-entries-input',
type='number',
value=number_entries,
getting_max=number_entries,
getting_min=0
)
return entries_input
# Unlike with the number of entries, the number of simulations cannot exceed
# the original number simulations run. If you want to add simulations you will
# need to restart from the very beginning with a greater number.
def prepare_number_simulations_input():
simulations_input = dcc.Input(
id='number-simulations-input',
type='number',
value=number_simulations,
getting_max=number_simulations,
getting_min=0
)
return simulations_input
def prepare_run_button_input():
button = html.Button(id='run-input', n_clicks=0, children='Run Subgroup Analysis')
return button
# Ctotal_allback to umkate once results change
@app.ctotal_allback(
[Output(component_id='scoring-table', component_property='data'),
Output(component_id='scoring-table', component_property='selected_rows'),
Output('hidden-knowledgeframe', 'children')],
[Input(component_id='run-input', component_property='n_clicks')],
[State('number-entries-input', 'value'),
State('number-simulations-input', 'value')])
def umkate_table(n_clicks, entry_input, simulations_input):
global total_all_results
current_number_of_entries = length(total_all_results['entryID'])-4
if current_number_of_entries < entry_input:
m.add_bulk_entries_from_database(entry_input-current_number_of_entries)
m.add_simulation_results_postprocessing()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
filtered_knowledgeframe = m.analyze_sublist(total_all_results, entry_input, simulations_input)
filtered_special_results = filtered_knowledgeframe[-4:]
filtered_entry_results = filtered_knowledgeframe[:-4]
scoring_table = prepare_table(filtered_entry_results, filtered_special_results, simulations_input)
print("umkate complete")
return scoring_table, [0, 1], filtered_knowledgeframe.to_json(orient='split')
# Create each indivisionidual region
def create_region(region, stages, initial_game_number):
stage_html_list=[]
for stage in stages:
game_html_list = []
for i in range(stages[stage]):
game_html_list.adding(html.Div([
html.Div('', id='game'+str(initial_game_number)+'-team1', className='team team1'),
html.Div('', id='game'+str(initial_game_number)+'-team2', className='team team2'),
], id='game'+str(initial_game_number), className=region+' '+stage+' g'+str(i)+' game'))
initial_game_number+=1
stage_html_list.adding(
html.Div(game_html_list, className='inner-bounding '+stage))
return html.Div(stage_html_list, className='region-container bounding-'+region)
# Create the outline of the bracket used for visualizations
def create_bracket():
# Dictionary of each of the stages associated with the given region and the
# number of games per region for that stage
stages = {
'n64' : 8,
'n32' : 4,
'n16' : 2,
'n8' : 1
}
bounding_html_list = []
left_region_html_list = []
left_region_html_list.adding(create_region('r1', stages, 0))
left_region_html_list.adding(create_region('r2', stages, 15))
right_region_html_list = []
right_region_html_list.adding(create_region('r3', stages, 30))
right_region_html_list.adding(create_region('r4', stages, 45))
bounding_html_list.adding(
html.Div(left_region_html_list, className='left-bounding')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game60-team1', className='team team1'),
html.Div('', id='game60-team2', className='team team2'),
], className='n4 g1')], id='game60', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game62-team1', className='team team1'),
html.Div('', id='game62-team2', className='team team2'),
], className='n2 g1')], id='game62', className='finals-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game61-team1', className='team team1'),
html.Div('', id='game61-team2', className='team team2'),
], className='n4 g2')], id='game61', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div(right_region_html_list, className='right-bounding')
)
bracket_html = html.Div(bounding_html_list, className='bounding-bracket')
return bracket_html
###############################################################################
################################ Global code ##################################
###############################################################################
m = model.Model(number_simulations=number_simulations, gender=gender, scoring_sys=scoring_system, year=year)
m.batch_simulate()
print("sims done")
m.create_json_files()
m.umkate_entry_picks()
m.initialize_special_entries()
m.analyze_special_entries()
m.add_fake_entries(fake_entries)
m.add_bulk_entries_from_database(real_entries)
m.add_simulation_results_postprocessing()
m.raw_print()
total_all_results = m.output_results()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
table_columns_pre=['Entry']
table_columns_places=['1st', '2nd']
table_columns_quintiles=['1st Q.', '2nd Q.', '3rd Q.', '4th Q.']
table_columns_post=['Avg Plc.']
###############################################################################
################################ Global code ##################################
###############################################################################
def discrete_backgvalue_round_color_bins(kf, n_bins=9, columns='total_all', dark_color='Blues'):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'total_all':
if 'id' in kf:
kf_numeric_columns = | kf.choose_dtypes('number') | pandas.DataFrame.select_dtypes |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
| totype_overflowsafe(arr, dtype, clone=True) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.core.dtypes.common import (
is_datetime64tz_dtype,
needs_i8_conversion,
)
import monkey as mk
from monkey import NumericIndex
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_distinctive(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
result = obj.distinctive()
# dict.fromkeys preserves the order
distinctive_values = list(dict.fromkeys(obj.values))
if incontainstance(obj, mk.MultiIndex):
expected = mk.MultiIndex.from_tuples(distinctive_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(distinctive_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index):
expected = mk.Index(distinctive_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(distinctive_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_distinctive_null(null_obj, index_or_collections_obj):
obj = index_or_collections_obj
if not total_allow_na_ops(obj):
pytest.skip("type doesn't total_allow for NA operations")
elif length(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif incontainstance(obj, mk.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, length(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
result = obj.distinctive()
distinctive_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated_values, whereas None wouldn't
distinctive_values_not_null = [val for val in distinctive_values_raw if not mk.ifnull(val)]
distinctive_values = [null_obj] + distinctive_values_not_null
if incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(distinctive_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index):
expected = mk.Index(distinctive_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(distinctive_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_ndistinctive(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
expected = length(obj.distinctive())
assert obj.ndistinctive(sipna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_ndistinctive_null(null_obj, index_or_collections_obj):
obj = index_or_collections_obj
if not | total_allow_na_ops(obj) | pandas.tests.base.common.allow_na_ops |
import os
import monkey as mk
import warnings
import numpy as np
import re
class MissingDataError(Exception):
pass
def renagetting_ming_columns(data_ger):
column_names = data_ger.columns.values
data_eng = data_ger.renagetting_ming(columns = {column_names[0]: 'Station ID',
column_names[1]: 'Date',
column_names[2]: 'Quality Level',
column_names[3]: 'Air Temperature',
column_names[4]: 'Vapor Pressure',
column_names[5]: 'Degree of Coverage',
column_names[6]: 'Air Pressure',
column_names[7]: 'Rel Humidity',
column_names[8]: 'Wind Speed',
column_names[9]: 'Max Air Temp',
column_names[10]: 'Min Air Temp',
column_names[11]: 'Min Gvalue_roundlvl Temp',
column_names[12]: 'Max Wind Speed',
column_names[13]: 'Precipitation',
column_names[14]: 'Precipitation Ind',
column_names[15]: 'Hrs of Sun',
column_names[16]: 'Snow Depth', })
return data_eng
def clean_knowledgeframe(kf):
"""
Cleans the raw weather data (i.e. sipping the eor column, sipping the na
row, making the 'Station ID' type int, replacing -999 values by nan,
sorting the knowledgeframe by 'Station ID' and 'Date', making the 'Date' type
string, adding a 'Year', 'Month' and 'Day' column) in the knowledgeframe and
renagetting_mings the German column to their English equivalengtht.
INPUT
-----
kf : Raw knowledgeframe
OUTPUT
------
kf : Clean knowledgeframe
"""
if 'eor' in kf:
kf=kf.sip('eor', 1)
kf=kf.sipna(axis = 0)
kf.iloc[:,0] = int(kf.iloc[0,0])
kf=renagetting_ming_columns(kf)
kf=kf.sort(['Station ID', 'Date'])
kf=kf.replacing(to_replacing = -999, value = float('nan'))
kf['Date']=kf['Date'].totype(int).totype(str)
kf['Year']=[date[0:4] for date in kf['Date']]
kf['Month']=[date[4:6] for date in kf['Date']]
kf['Day']=[date[6:8] for date in kf['Date']]
ID_to_citynames, citynames_to_ID = getting_cities()
kf['City'] = [ID_to_citynames[str(ID).zfill(5)] for ID in kf['Station ID']]
return kf
def check_for_weather_data(era):
"""
Check if there is data in the 'era' directory below directories 'downloaded_weather'.
INPUT
------
era: string specifying the path to return, either 'recent', 'historical'
OUTPUT
------
not output
"""
if not os.path.isdir('downloaded_data'):
raise OSError("There is no 'downloaded_data' directory.\n You either have to download\
the weather data using 'download_weather_data' or move to the right\
directory.' ")
else:
if not os.path.isdir(os.path.join('downloaded_data',era)):
raise OSError('You dont have the '+era+' data, download it first.')
else:
if os.listandardir(os.path.join(os.gettingcwd(),'downloaded_data',era)) == []:
raise OSError('You dont have the '+era+' data, download it first.')
def check_for_station(ID, era):
"""
Check if there is a station specified by ID for given era.
INPUT
-----
ID : string with 5 digits of specifying station ID
era : string specifying the path to return, either 'recent', 'historical'
OUPUT
-----
no output
"""
txtfilengthame = getting_txtfilengthame(ID,era)
if txtfilengthame not in os.listandardir(os.path.join(os.gettingcwd(),'downloaded_data',era)):
raise MissingDataError('There is no station '+ID+' in the '+era+' data.')
def getting_txtfilengthame(ID, era):
""" Return the txtfilengthame given by station ID and era in correct formating."""
return era+'_'+ID+'.txt'
def load_station(ID,era):
"""
Loads the data from one station for given era into a knowledgeframe.
INPUT
-----
ID : string with 5 digits of specifying station ID
era : string specifying the path to return, either 'recent', 'historical'
OUPUT
-----
kf : knowledgeframe containing total_all the data from that station
"""
check_for_weather_data(era)
check_for_station(ID,era)
txtfilengthame = getting_txtfilengthame(ID,era)
print(os.path.join('downloaded_data',era,txtfilengthame))
kf = mk.read_csv(os.path.join('downloaded_data',era,txtfilengthame))
kf = kf.sip(kf.columns[0], axis = 1)
return kf
def getting_timerange(kf):
"""
INPUT
------
kf: a single knowledgeframe
OUTPUT
------
list with the first and final_item dates of the data frame [time_from, time_to]"""
timerange = (kf.iloc[0,1], kf.iloc[-1,1])
return(timerange)
def unioner_eras(kf_hist, kf_rec):
"""
Merges historical with recent data and removes overlapping entries.
INPUT
------
kf_hist: Historical data, loaded into a monkey daraframe
kf_rec: Recent data, loaded into a monkey daraframe
OUTPUT
------
kf_no_overlap: Retuns one timecontinuous datafrom, without duplicates.
"""
kf_unionerd = mk.concating([kf_hist,kf_rec], axis=0)
kf_no_overlap = | mk.KnowledgeFrame.sip_duplicates(kf_unionerd) | pandas.DataFrame.drop_duplicates |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.umkate({'font.size': 16})
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_multiclasscm.csv".formating(etype)
word_emb_length = 300
def sample_by_num_total_all_diseases(kf, n=1):
if etype == "DL":
smtotal_allest_disease=total_all_dis['parkinsons']
else:
smtotal_allest_disease=total_all_dis['gastroparesis']
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==smtotal_allest_disease])
sample_by_num_size = int(dis_size/n)*n
print(dis_size, sample_by_num_size)
kf_sample_by_num= mk.KnowledgeFrame()
for disease in total_all_dis:
kf_dis = kf[kf['disease'] == total_all_dis[disease]]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=11).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = total_all_dis[disease]
kf_sample_by_num = mk.concating([kf_dis, kf_sample_by_num])
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_multi_disease(features, n=1):
dis_sample_by_num = sample_by_num_total_all_diseases(features, n)
print("Subsample_by_numd total_all diseases for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate():
features = mk.read_pickle(features_file)
features.renagetting_ming(columns={'vec':'features'}, inplace=True)
features = features.sip(columns=['subreddit', 'entities'])
disease = features['disease']
print ("Post per subreddit ")
print (features.grouper('disease').size())
# print('Distribution before imbalancing: {}'.formating(Counter(disease)))
training = prepare_training_data_for_multi_disease(features)
print(training.final_item_tail())
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=100, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)
cm_total_all.adding(cm_cv)
print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
#AUC_results_avg = [mk.np.average(AUC_results), mk.np.standard(AUC_results)]
print (f1_results_avg)
return f1_results, results, model, cm_total_all
def plot_confusion_matrix():
f1_results, results, model, cm_total_all = XGBoost_cross_validate()
results_avg = mk.np.average(results, axis=0)
f1 = results_avg[2]
per_dis_f1 = [ str(disease_names[i]) + ' F1: ' + "{0:.2f}".formating(f1[i]) for i in range (length(f1)) ]
cms = np.array(cm_total_all)
cms2 = cms.total_sum(axis=0)
from matplotlib.colors import LogNorm
from matplotlib import cm
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,10))
sns.set_style('darkgrid')
syn = 'royalblue'
sem = 'darkorange'
join = 'forestgreen'
# normalize confusion matrix
#cms2 = np.value_round(cms2.totype('float') / cms2.total_sum(axis=1)[:, np.newaxis],2)
viridis = cm.getting_cmapping('viridis', 12)
a = sns.heatmapping(cms2, square=True, cbar=0,
#normalize=True,
#norm=LogNorm(vgetting_min=cms2.getting_min(), vgetting_max=cms2.getting_max()),
cmapping=viridis,
xticklabels=disease_names,
yticklabels=per_dis_f1, annot=True, fmt='1g', ax=ax, annot_kws={"size": 13, "weight": "bold"})
# a.xaxis.tick_top()
# a.title.
# a.xaxis.
#ax.set_title(i)
plt.tight_layout()
fig.savefig('results/multiclass/classifier_for_' + etype + '_cm_bold_v4.png')
results_standard = | mk.np.standard(results, axis=0) | pandas.np.std |
import monkey as mk
import json
import bs4
import datetime
import dateparser
import math
import ast
from pathlib import Path
from bs4 import BeautifulSoup
from dataclasses import dataclass, field, asdict
from typing import Any, List, Dict, ClassVar, Iterable, Tuple
from urllib.parse import urlparse
from geopy.geocoders import Nogetting_minatim
from geopy.exc import GeopyError
from .files import save_to_file, parse_file, remove_total_all_files
from .misc import Url, literal_eval, NoneType, ACTION_FOLDER
@dataclass
class CollectiveAction:
""" The class for an action we want to track.
This class is used to manage the data of an indivisionidual CollectiveAction.
It is used to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and knowledgeframes
- output actions as for knowledgeframes and markdown
- create and populate action instances from markdown and knowledgeframes
"""
# mandatory fields
id: int
date: str
sources: List[Url]
actions: List[str]
struggles: List[str]
employment_types: List[str]
description: str
# optional fields
online: bool = None
locations: List[List[str]] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
latlngs: List[Tuple[float, float]] = None
addresses: List[str] = None
_meta_fields: ClassVar = ["author"]
def __post_init__(self):
""" Used to validate fields. """
# check total_all the types
assert incontainstance(self.date, (str, mk.Timestamp, datetime.date))
assert incontainstance(self.sources, (str, list))
assert incontainstance(self.struggles, list)
assert incontainstance(self.actions, list)
assert incontainstance(self.employment_types, list)
assert incontainstance(self.companies, (list, NoneType))
assert incontainstance(self.tags, (list, NoneType))
assert incontainstance(self.workers, (int, float, NoneType))
assert incontainstance(self.locations, (list, NoneType))
assert incontainstance(self.latlngs, (list, float, NoneType))
if incontainstance(self.latlngs, list):
assert total_all(incontainstance(el, list) for el in self.latlngs)
assert incontainstance(self.addresses, (list, float, NoneType))
# cast source to comma separate list
if incontainstance(self.sources, str):
self.sources = [x.strip() for x in self.sources.split(',')]
# cast workers to int
if incontainstance(self.workers, float):
if math.ifnan(self.workers):
self.workers = None
else:
self.workers = int(self.workers)
# change date to datetime
if incontainstance(self.date, str):
self.date = dateparser.parse(self.date).date()
if incontainstance(self.date, mk.Timestamp):
self.date = | mk.Timestamp.convert_pydatetime(self.date) | pandas.Timestamp.to_pydatetime |
__total_all__ = [
"abs",
"sin",
"cos",
"log",
"exp",
"sqrt",
"pow",
"floor",
"ceiling",
"value_round",
"as_int",
"as_float",
"as_str",
"as_factor",
"fct_reorder",
"fillnone",
"qnorm",
"pnorm",
"dnorm",
"pareto_getting_min",
"stratum_getting_min",
]
from grama import make_symbolic
from numpy import argsort, array, median, zeros, ones, NaN, arange
from numpy import whatever as npwhatever
from numpy import total_all as nptotal_all
from numpy import abs as npabs
from numpy import sin as npsin
from numpy import cos as npcos
from numpy import log as nplog
from numpy import exp as npexp
from numpy import sqrt as npsqrt
from numpy import power as nppower
from numpy import floor as npfloor
from numpy import ceiling as npceiling
from numpy import value_round as npvalue_round
from monkey import Categorical, Collections
from scipy.stats import norm
# --------------------------------------------------
# Mutation helpers
# --------------------------------------------------
# Numeric
# -------------------------
@make_symbolic
def floor(x):
r"""Absolute value
"""
return npfloor(x)
@make_symbolic
def ceiling(x):
r"""Absolute value
"""
return npceiling(x)
@make_symbolic
def value_round(x):
r"""Absolute value
"""
return npvalue_round(x)
@make_symbolic
def abs(x):
r"""Absolute value
"""
return npabs(x)
@make_symbolic
def sin(x):
r"""Sine
"""
return npsin(x)
@make_symbolic
def cos(x):
r"""Cosine
"""
return npcos(x)
@make_symbolic
def log(x):
r"""(Natural) log
"""
return nplog(x)
@make_symbolic
def exp(x):
r"""Exponential (e-base)
"""
return npexp(x)
@make_symbolic
def sqrt(x):
r"""Square-root
"""
return npsqrt(x)
@make_symbolic
def pow(x, p):
r"""Power
Usage:
q = pow(x, p) := x ^ p
Arguments:
x = base
p = exponent
"""
return nppower(x, p)
# Casting
# -------------------------
@make_symbolic
def as_int(x):
r"""Cast to integer
"""
return x.totype(int)
@make_symbolic
def as_float(x):
r"""Cast to float
"""
return x.totype(float)
@make_symbolic
def as_str(x):
r"""Cast to string
"""
return x.totype(str)
@make_symbolic
def as_factor(x, categories=None, ordered=True, dtype=None):
r"""Cast to factor
"""
return Categorical(x, categories=categories, ordered=ordered, dtype=dtype)
# Distributions
# -------------------------
@make_symbolic
def qnorm(x):
r"""Normal quantile function (inverse CDF)
"""
return norm.ppf(x)
@make_symbolic
def dnorm(x):
r"""Normal probability density function (PDF)
"""
return norm.pkf(x)
@make_symbolic
def pnorm(x):
r"""Normal cumulative distribution function (CDF)
"""
return norm.ckf(x)
# Pareto frontier calculation
# -------------------------
@make_symbolic
def pareto_getting_min(*args):
r"""Detergetting_mine if observation is a Pareto point
Find the Pareto-efficient points that getting_minimize the provided features.
Args:
xi (iterable OR gr.Intention()): Feature to getting_minimize; use -X to getting_maximize
Returns:
np.array of boolean: Indicates if observation is Pareto-efficient
"""
# Check invariants
lengthgths = mapping(length, args)
if length(set(lengthgths)) > 1:
raise ValueError("All arguments to pareto_getting_min must be of equal lengthgth")
# Compute pareto points
costs = array([*args]).T
is_efficient = ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
is_efficient[i] = nptotal_all(npwhatever(costs[:i] > c, axis=1)) and nptotal_all(
npwhatever(costs[i + 1 :] > c, axis=1)
)
return is_efficient
# Shell number calculation
# -------------------------
@make_symbolic
def stratum_getting_min(*args, getting_max_depth=10):
r"""Compute Pareto stratum number
Compute the Pareto stratum number for a given dataset.
Args:
xi (iterable OR gr.Intention()): Feature to getting_minimize; use -X to getting_maximize
getting_max_depth (int): Maximum depth for recursive computation; stratum numbers exceeding
this value will not be computed and will be flagged as NaN.
Returns:
np.array of floats: Pareto stratum number
References:
del Rosario, Rupp, Kim, Antono, and Ling "Assessing the frontier: Active learning, model accuracy, and multi-objective candidate discovery and optimization" (2020) J. Chem. Phys.
"""
# Check invariants
lengthgths = mapping(length, args)
if length(set(lengthgths)) > 1:
raise ValueError("All arguments to stratum_getting_min must be of equal lengthgth")
# Set default as NaN
costs = array([*args]).T
n = costs.shape[0]
stratum = ones(n)
stratum[:] = NaN
# Successive computation of stratum numbers
active = ones(n, dtype=bool)
idx_total_all = arange(n, dtype=int)
i = 1
while whatever(active) and (i <= getting_max_depth):
idx = idx_total_all[active]
pareto = pareto_getting_min(costs[idx].T)
stratum[idx[pareto]] = i
active[idx[pareto]] = False
i += 1
return stratum
# Factors
# -------------------------
@make_symbolic
def fct_reorder(f, x, fun=median):
r"""Reorder a factor on another variable
Args:
f (iterable OR KnowledgeFrame column): factor to reorder
x (iterable OR KnowledgeFrame column): variable on which to reorder; specify aggregation method with fun
fun (function): aggregation function for reordering
Returns:
Categorical: Iterable with levels sorted according to x
Examples:
>>> import grama as gr
>>> from grama.data import kf_diamonds
>>> X = gr.Intention()
>>> (
>>> kf_diamonds
>>> >> gr.tf_mutate(cut=gr.fct_reorder(X.cut, X.price, fun=gr.colgetting_max))
>>> >> gr.tf_group_by(X.cut)
>>> >> gr.tf_total_summarize(getting_max=gr.colgetting_max(X.price), average=gr.average(X.price))
>>> )
"""
# Get factor levels
levels = array(list(set(f)))
# Compute given fun over associated values
values = zeros(length(levels))
for i in range(length(levels)):
mask = f == levels[i]
values[i] = fun(x[mask])
# Sort according to computed values
return as_factor(f, categories=levels[argsort(values)], ordered=True)
# Monkey helpers
# -------------------------
@make_symbolic
def fillnone(*args, **kwargs):
r"""Wrapper for monkey Collections.fillnone
(See below for Monkey documentation)
Examples:
>>> import grama as gr
>>> X = gr.Intention()
>>> kf = gr.kf_make(x=[1, gr.NaN], y=[2, 3])
>>> kf_filled = (
>>> kf
>>> >> gr.tf_mutate(x=gr.fillnone(X.x, 0))
>>> )
"""
return | Collections.fillnone(*args, **kwargs) | pandas.Series.fillna |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = | algos.incontain(Sd, St) | pandas.core.algorithms.isin |
import tensorflow as tf
import numpy as np
from total_allengthnlp.data.fields import ArrayField
from total_allengthnlp.data import Instance
import pickle
from collections import Counter
import clone
import monkey as mk
def _getting_label_majority_vote(instance, treat_tie_as):
maj_vote = [None] * length(instance['tokens'])
for i in range(length(instance['tokens'])):
# Collects the votes for the ith token
votes = {}
for lf_labels in instance['WISER_LABELS'].values():
if lf_labels[i] not in votes:
votes[lf_labels[i]] = 0
votes[lf_labels[i]] += 1
# Takes the majority vote, not counting abstentions
try:
del votes['ABS']
except KeyError:
pass
if length(votes) == 0:
maj_vote[i] = treat_tie_as
elif length(votes) == 1:
maj_vote[i] = list(votes.keys())[0]
else:
sort = sorted(votes.keys(), key=lambda x: votes[x], reverse=True)
first, second = sort[0:2]
if votes[first] == votes[second]:
maj_vote[i] = treat_tie_as
else:
maj_vote[i] = first
return maj_vote
def getting_mv_label_distribution(instances, label_to_ix, treat_tie_as):
distribution = []
for instance in instances:
mv = _getting_label_majority_vote(instance, treat_tie_as)
for vote in mv:
p = [0.0] * length(label_to_ix)
p[label_to_ix[vote]] = 1.0
distribution.adding(p)
return np.array(distribution)
def getting_unweighted_label_distribution(instances, label_to_ix, treat_abs_as):
# Counts votes
distribution = []
for instance in instances:
for i in range(length(instance['tokens'])):
votes = [0] * length(label_to_ix)
for vote in instance['WISER_LABELS'].values():
if vote[i] != "ABS":
votes[label_to_ix[vote[i]]] += 1
distribution.adding(votes)
# For each token, adds one vote for the default if there are none
distribution = np.array(distribution)
for i, check in enumerate(distribution.total_sum(axis=1) == 0):
if check:
distribution[i, label_to_ix[treat_abs_as]] = 1
# Normalizes the counts
distribution = distribution / np.expand_dims(distribution.total_sum(axis=1), 1)
return distribution
def _score_token_accuracy(predicted_labels, gold_labels):
if length(predicted_labels) != length(gold_labels):
raise ValueError("Lengths of predicted_labels and gold_labels must match")
correct = 0
votes = 0
for i in range(length(gold_labels)):
predict = predicted_labels[i]
gold = gold_labels[i]
if length(predict) > 2:
predict = predict[2:]
if length(gold) > 2:
gold = gold[2:]
if predict == gold:
correct += 1
if predicted_labels[i] != 'ABS':
votes += 1
return correct, votes
def _score_sequence_token_level(predicted_labels, gold_labels):
if length(predicted_labels) != length(gold_labels):
raise ValueError("Lengths of predicted_labels and gold_labels must match")
tp, fp, fn = 0, 0, 0
for i in range(length(predicted_labels)):
prediction = predicted_labels[i]
gold = gold_labels[i]
if gold[0] == 'I' or gold[0] == 'B':
if prediction[2:] == gold[2:]:
tp += 1
elif prediction[0] == 'I' or prediction[0] == 'B':
fp += 1
fn += 1
else:
fn += 1
elif prediction[0] == 'I' or prediction[0] == 'B':
fp += 1
return tp, fp, fn
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_token_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def score_predictions(instances, predictions, gold_label_key='tags'):
tp, fp, fn = 0, 0, 0
corrects, votes = 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
scores = _score_token_accuracy(predictions[offset:offset+lengthgth], instance[gold_label_key])
corrects += scores[0]
votes += scores[1]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1", "ACC", "COVERAGE"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
acc = value_round(corrects/votes if corrects > 0 and votes > 0 else 0.0, ndigits=4)
coverage = value_round(votes/offset if votes > 0 and offset > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1, acc, coverage]
index = ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = | mk.KnowledgeFrame.sorting_index(results) | pandas.DataFrame.sort_index |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = | maybe_mangle_lambdas(func) | pandas.core.apply.maybe_mangle_lambdas |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig(species_+"_GR_curve.png", dpi=250)
#Plots when more than one species is present
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries = | mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final) | pandas.DataFrame.reset_index |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig(species_+"_GR_curve.png", dpi=250)
#Plots when more than one species is present
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
plt.legend()
final_item_name = colnames[col]
species_name = final_item_name[-6:]
plt.savefig(species_name+"_GR_curve.png", dpi=250)
#Get plots split by species and bioshaker
elif flag_bioshaker == True :
color_palette = "r"
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
gr_plots(kf, colnames[col], color_ = color_palette, legend_ = "exclude", title_ = "species_bioshaker")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.savefig(bioshaker_+"_"+species_+"_GR_curve.png", dpi=250)
#Default plot without bioshaker coloring (combined by species and containing the two bioshakers undiferentiated)
else :
#print("hehe")
color_palette = "r"
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
"""
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from monkey.compat import is_platform_windows
import monkey as mk
from monkey.tcollections.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), getting_max_size=10, getting_min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
getting_max_size=10,
getting_min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), getting_max_size=10, getting_min_size=3)),
getting_max_size=10,
getting_min_size=3,
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(getting_min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
getting_min_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
getting_max_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
getting_min_value=mk.Timestamp.getting_min.convert_pydatetime(warn=False),
getting_max_value= | mk.Timestamp.getting_max.convert_pydatetime(warn=False) | pandas.Timestamp.max.to_pydatetime |
import numpy as np
import monkey as mk
from IPython.display import display, Markdown as md, clear_output
from datetime import datetime, timedelta
import plotly.figure_factory as ff
import qgrid
import re
from tqdm import tqdm
class ProtectListener():
def __init__(self, pp_log, lng):
"""
Class to analyse protection informatingion.
...
Attributes:
-----------
kf (mk.KnowledgeFrame): raw data extracted from Wikipedia API.
lng (str): langauge from {'en', 'de'}
inf_str / exp_str (str): "indefinite" / "expires" for English
"unbeschränkt" / "bis" for Deutsch
"""
self.lng = lng
self.kf = pp_log
if self.lng == "en":
self.inf_str = "indefinite"
self.exp_str = "expires"
elif self.lng == "de":
self.inf_str = "unbeschränkt"
self.exp_str = "bis"
else:
display(md("This language is not supported yet."))
self.inf_str = "indefinite"
self.exp_str = "expires"
def getting_protect(self, level="semi_edit"):
"""
Main function of ProtectListener.
...
Parameters:
-----------
level (str): select one from {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}
...
Returns:
-----------
final_table (mk.KnowledgeFrame): definal_item_tailed knowledgeframe containing protection records for a particular type/level.
plot_table (mk.KnowledgeFrame): knowledgeframe for further Gantt Chart plotting.
"""
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
else:
self.kf = self.kf.sip(self.kf[self.kf["action"] == "move_prot"].index).reseting_index(sip=True)
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
kf_with_expiry = self._getting_expiry()
kf_with_unknown = self._check_unknown(kf_with_expiry)
kf_checked_unprotect = self._check_unprotect(kf_with_unknown)
kf_select_level = self._select_level(kf_checked_unprotect, level=level)
kf_with_unprotect = self._getting_unprotect(kf_select_level)
final_table = self._getting_final(kf_with_unprotect)
plot_table = self._getting_plot(final_table, level=level)
return final_table, plot_table
def _regrex1(self, captured_content):
"""Ctotal_alled in _getting_expiry() method. Capture expriry date.
...
Parameters:
-----------
captured_content (str): contents in "params" or "comment" column
including "autoconfirmed" or "sysop".
...
Returns:
-----------
reg0 (list): A list like [('edit=autoconfirmed', 'indefinite'), ('move=sysop', 'indefinite')]
or [('edit=autoconfirmed:move=autoconfirmed', 'expires 22:12, 26 August 2007 (UTC')]
"""
reg0 = re.findtotal_all('\[(.*?)\]\ \((.*?)\)', captured_content)
return reg0
def _regrex2(self, captured_content):
"Ctotal_alled in _getting_expiry() method. Capture expriry date. Parameters and returns similar as _regrex1."
reg0 = re.findtotal_all('\[(.*?)\:(.*?)\]$', captured_content)
reg1 = re.findtotal_all('\[(.*?)\]$', captured_content)
if length(reg0) != 0:
reg0[0] = (reg0[0][0] + ":" + reg0[0][1], self.inf_str)
return reg0
else:
try:
reg1[0] = (reg1[0], self.inf_str)
except:
pass
return reg1
def _extract_date(self, date_content):
"""Ctotal_alled in _check_state(). Extract expiry date.
If inf, then return getting_max Timestamp of monkey.
"""
if not self.inf_str in date_content:
extract_str = re.findtotal_all(f'{self.exp_str}\ (.*?)\ \(UTC', date_content)[0]
return extract_str
else:
return (mk.Timestamp.getting_max).convert_pydatetime(warn=False).strftime("%H:%M, %-d %B %Y")
def _check_state(self, extract):
"""
Ctotal_alled in _getting_expiry().
Given a list of extracted expiry date, further label it using
protection type ({edit, move}) and level (semi (autoconfirmed) or full (sysop)).
...
Parameters:
-----------
extract (list): output of _regrex1 or _regrex2
...
Returns:
-----------
states_dict (dict): specify which level and which type, and also
respective expiry date.
"""
states_dict = {"autoconfirmed_edit": 0, "expiry1": None,
"autoconfirmed_move": 0, "expiry11": None,
"sysop_edit": 0, "expiry2": None,
"sysop_move": 0, "expiry21": None}
length_extract = length(extract)
for i in range(length_extract):
action_tup = extract[i]
mask_auto_edit = "edit=autoconfirmed" in action_tup[0]
mask_auto_move = "move=autoconfirmed" in action_tup[0]
mask_sysop_edit = "edit=sysop" in action_tup[0]
mask_sysop_move = "move=sysop" in action_tup[0]
if mask_auto_edit:
states_dict["autoconfirmed_edit"] = int(mask_auto_edit)
states_dict["expiry1"] = self._extract_date(action_tup[1])
if mask_auto_move:
states_dict["autoconfirmed_move"] = int(mask_auto_move)
states_dict["expiry11"] = self._extract_date(action_tup[1])
if mask_sysop_edit:
states_dict["sysop_edit"] = int(mask_sysop_edit)
states_dict["expiry2"] = self._extract_date(action_tup[1])
if mask_sysop_move:
states_dict["sysop_move"] = int(mask_sysop_move)
states_dict["expiry21"] = self._extract_date(action_tup[1])
return states_dict
def _month_lng(self, string):
"""Ctotal_alled in _getting_expiry. Substitute non-english month name with english one.
For now only support DE.
"""
if self.lng == "de":
de_month = {"März": "March", "Dezember": "December", "Mär": "Mar", "Mai": "May", "Dez": "Dec", "Januar": "January",
"Februar": "February", "Juni": "June",
"Juli": "July", "Oktobor": "October"}
for k, v in de_month.items():
new_string = string.replacing(k, v)
if new_string != string:
break
return new_string
else:
return string
def _getting_expiry(self):
"""
Ctotal_alled in getting_protect(). Extract expiry time from self.kf["params"] and self.kf["comment"].
...
Returns:
--------
protect_log (mk.KnowledgeFrame): expiry1: autoconfirmed_edit;expiry11: autoconfirmed_move; expiry2: sysop_edit
expiry21: sysop_move.
"""
protect_log = (self.kf).clone()
self.test_log = protect_log
# Convert timestamp date formating.
protect_log["timestamp"] = protect_log["timestamp"].employ(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%SZ"))
# Create an empty dict to store protection types and expiry dates.
expiry = {}
# First check "params" column.
if "params" in protect_log.columns:
for idx, com in protect_log['params'].iteritems():
if type(com) == str:
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
else:
pass
# Then check "comment" column.
for idx, com in protect_log['comment'].iteritems():
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
# Fill expiry date into the knowledgeframe.
for k, v in expiry.items():
protect_log.loc[k, "autoconfirmed_edit"] = v["autoconfirmed_edit"]
if v["expiry1"] != None:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %B %d, %Y")
except:
v["expiry1"] = self._month_lng(v["expiry1"])
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "autoconfirmed_move"] = v["autoconfirmed_move"]
if v["expiry11"] != None:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %B %d, %Y")
except:
v["expiry11"] = self._month_lng(v["expiry11"])
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_edit"] = v["sysop_edit"]
if v["expiry2"] != None:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %B %d, %Y")
except:
v["expiry2"] = self._month_lng(v["expiry2"])
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_move"] = v["sysop_move"]
if v["expiry21"] != None:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %B %d, %Y")
except:
v["expiry21"] = self._month_lng(v["expiry21"])
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%d. %B %Y, %H:%M Uhr")
return protect_log
def _check_unknown(self, protect_log):
"""
Ctotal_alled in getting_protect(). Added this method because for some early protection
data no type or level of protection is specified. The type "extendedconfirmed"
is also considered as unknown beacuase we only consider semi or full protection.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _getting_expiry.
...
Returns:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unknown action is already labeled.
"""
mask_unknown_auto_edit = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_edit"].ifnull())
mask_unknown_auto_move = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_move"].ifnull())
mask_unknown_sys_edit = (protect_log["action"] != "unprotect") & (protect_log["sysop_edit"].ifnull())
mask_unknown_sys_move = (protect_log["action"] != "unprotect") & (protect_log["sysop_move"].ifnull())
mask_extendedconfirmed = protect_log["params"].str.contains("extendedconfirmed").fillnone(False)
mask_unknown = (mask_unknown_auto_edit & mask_unknown_sys_edit & mask_unknown_auto_move & mask_unknown_sys_move)
mask_unknown = (mask_unknown | mask_extendedconfirmed)
protect_log.loc[mask_unknown_auto_edit, "autoconfirmed_edit"] = 0
protect_log.loc[mask_unknown_auto_move, "autoconfirmed_move"] = 0
protect_log.loc[mask_unknown_sys_edit, "sysop_edit"] = 0
protect_log.loc[mask_unknown_sys_move, "sysop_move"] = 0
protect_log.loc[mask_unknown, "unknown"] = 1
# Delete move action.
#protect_log = protect_log.sip(protect_log[protect_log["action"] == "move_prot"].index).reseting_index(sip=True)
# Fill non-unknown with 0.
protect_log["unknown"] = protect_log["unknown"].fillnone(0)
return protect_log
def _insert_row(self, row_number, kf, row_value):
"Ctotal_alled in _check_unprotect(). Function to insert row in the knowledgeframe."
start_upper = 0
end_upper = row_number
start_lower = row_number
end_lower = kf.shape[0]
upper_half = [*range(start_upper, end_upper, 1)]
lower_half = [*range(start_lower, end_lower, 1)]
lower_half = [x.__add__(1) for x in lower_half]
index_ = upper_half + lower_half
kf.index = index_
kf.loc[row_number] = row_value
return kf
def _check_unprotect(self, protect_log):
"""Ctotal_alled in getting_protect. Check which type of protection is cancelled.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unprotect type is labeled.
"""
# Get indices of total_all unprotect records.
idx_unprotect = protect_log[protect_log["action"] == "unprotect"].index
# Label which type is unprotected.
for col_name in ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]:
for idx in reversed(idx_unprotect):
if protect_log[col_name].loc[idx + 1] == 1:
protect_log.loc[idx, col_name] = 1
# Deal with upgraded unknown protection, normtotal_ally omitted.
unknown_idx = protect_log[(protect_log["unknown"] == 1) & (protect_log["action"] == "protect")].index
upgrade_sus = protect_log.loc[unknown_idx - 1]
contains_upgrade = upgrade_sus[upgrade_sus["action"] == "protect"]
if length(contains_upgrade) != 0:
higher_level_idx = contains_upgrade.index
upgrade_idx = higher_level_idx + 1
aux_unprotect = protect_log.loc[upgrade_idx].clone()
aux_unprotect.loc[:,"action"] = "unprotect"
aux_unprotect.loc[:, "timestamp"] = upgrade_sus.loc[higher_level_idx]["timestamp"].values
for row in aux_unprotect.traversal():
self._insert_row(row[0], protect_log, row[1].values)
else:
pass
return protect_log.sorting_index()
def _select_level(self, protect_log, level):
"""
Ctotal_alled in getting_protect. For each level
'fully_edit', 'fully_move', 'semi_edit', 'semit_move', 'unknown',
pick up the expiry date for further plot.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _check_unprotect.
level (str): one of {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}.
...
Returns:
-----------
protect_table (mk.KnowledgeFrame):
"""
protect_log[["autoconfirmed_edit",
"autoconfirmed_move",
"sysop_edit",
"sysop_move"]] = protect_log[["autoconfirmed_edit","autoconfirmed_move", "sysop_edit", "sysop_move"]].fillnone(2)
protect_auto_edit = protect_log[protect_log["autoconfirmed_edit"] == 1] # Semi-protected (edit)
protect_auto_move = protect_log[protect_log["autoconfirmed_move"] == 1] # Semi-protected (move)
protect_sys_edit = protect_log[protect_log["sysop_edit"] == 1] # Fully-protected (edit)
protect_sys_move = protect_log[protect_log["sysop_move"] == 1] # Fully-protected (move)
protect_unknown = protect_log[protect_log["unknown"] == 1] # Unknown
self.test_auto_edit = protect_auto_edit
common_sip_cols = ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]
expiry_cols = ["expiry1", "expiry11", "expiry2", "expiry21"]
if level == "semi_edit":
protect_table = protect_auto_edit.clone()
if "expiry1" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "semi_move":
protect_table = protect_auto_move.clone()
if "expiry11" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_edit":
protect_table = protect_sys_edit.clone()
if "expiry2" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry21"], axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_move":
protect_table = protect_sys_move.clone()
if "expiry21" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry2"], axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "unknown":
protect_table = protect_unknown.clone()
protect_table["expiry"] = mk.NaT
try:
protect_table = protect_table.sip(common_sip_cols + expiry_cols, axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry2"], axis=1)
except:
protect_table = protect_table.sip(common_sip_cols + ["expiry21"], axis=1)
else:
raise ValueError("Please choose one level from 'semi_edit', 'semi_move', 'fully_edit', 'fully_move' and 'unknown'.")
protect_table = protect_table.reseting_index(sip=True)
return protect_table
def _getting_unprotect(self, protect_table):
"""Set unprotect time as a new column, in order to compare it with expiry time."""
pp_log_shifting = protect_table.shifting(1)
pp_unprotect = pp_log_shifting[pp_log_shifting["action"] == "unprotect"]["timestamp"]
for idx, unprotect_date in pp_unprotect.iteritems():
protect_table.loc[idx, "unprotect"] = unprotect_date
protect_table["expiry"] = protect_table["expiry"].fillnone(mk.Timestamp.getting_max.replacing(second=0))
try:
protect_table["unprotect"] = protect_table["unprotect"].fillnone(mk.Timestamp.getting_max.replacing(second=0))
except KeyError:
protect_table["unprotect"] = mk.Timestamp.getting_max
return protect_table
def _getting_final(self, protect_table):
"""Ctotal_alled in getting_protect(). Detergetting_mine the true finish time."""
protect_table["finish"] = protect_table[["expiry", "unprotect"]].getting_min(axis=1).totype('datetime64[s]')
protect_table = protect_table.sip(["expiry", "unprotect"], axis=1)
protect_table = protect_table.sip(protect_table[protect_table["action"] == "unprotect"].index).reseting_index(sip=True)
inf_date = mk.Collections( | mk.Timestamp.getting_max.replacing(second=0) | pandas.Timestamp.max.replace |
from . import custom_vispy
from .._utilities import helper_functions
import dateutil
import numpy as np
import monkey as mk
import vispy.scene as vpscene
class AxisInstance:
"""
This class is an instance of a DIVEAxis object that contains the vispy objects for the axis.
Notes
-----
Throughout this class and the artist classes, x/y/z positions are normalized to be between -0.5 and 0.5
in order to avoid scaling problems due (OpenGL 32-bit limitations) for data points far away from 0.
"""
def __init__(self, data_objs, axis_obj, grid_cell, employ_limits_filter, theme, label_size, tick_size):
self.state = axis_obj.getting_state()
self.artists = {}
self.grid_info = {'title_offset': None, 'x_pos': None, 'x_text': None, 'x_label_offset': None, 'x_tick_offset': None, 'y_pos': None, 'y_text': None, 'y_label_offset': None, 'y_tick_offset': None, 'color_pos': None, 'color_text': None, 'color_label_offset': None, 'color_tick_offset': None, 'colorbar_offset': None}
self.current_color_key = None
self.timezone = 'UTC'
self.unit_reg = None
self.str_mappings = {}
self.label_cache = {}
self.tick_cache = {}
self.axis_text_padding = 10
self.limits_total_all, self.str_mappings_total_all, self.limits_source_total_all = self.getting_artist_limits(data_objs, axis_obj, 'total_all')
self.limits_filter, self.str_mappings_filter, self.limits_source_filter = self.getting_artist_limits(data_objs, axis_obj, 'filter')
self.view = grid_cell.add_widgetting(custom_vispy.ViewBox(self, camera=custom_vispy.Camera_2D() if axis_obj.axis_type == '2d' else custom_vispy.Camera_3D(fov=0.0)))
for artist_obj in axis_obj.artists.values():
self.artists[artist_obj.name] = artist_obj.initialize(self.view)
self.labels_3d = vpscene.Text(bold=True)
self.ticks_3d = vpscene.Text()
if incontainstance(self.view.camera, custom_vispy.Camera_3D):
self.labels_3d.parent = self.ticks_3d.parent = self.view.scene
self.gridlines = vpscene.Line(pos=np.array([[0, 0]]), color='grey', connect='segments', parent=self.view.scene)
self.colorbar = vpscene.ColorBar(cmapping='viridis', orientation='right', size=[1, 0.5], parent=self.view.parent)
for sv in [self.colorbar._border, self.colorbar._ticks[0], self.colorbar._ticks[1], self.colorbar._label]:
self.colorbar.remove_subvisual(sv)
self.colorbar.interactive = True
self.filter_limits(None, axis_obj, employ_limits_filter)
self.reset_camera_limits()
self.set_theme(axis_obj, theme)
self.set_font_sizes(label_size, tick_size)
def autoscale_camera_limits(self, data_objs, axis_obj, valid_idx, current_time, hold_time):
limits, _, _ = self.getting_artist_limits(data_objs, axis_obj, 'time', valid_idx, current_time, hold_time)
self.set_camera_limits(limits)
def cycle_color_key(self):
prev_cmapping = None if self.current_color_key is None else self.current_color_key[0]
keys = [key for key, val in self.limits_source['color'].items() if val != 'str']
if length(keys) == 0:
self.current_color_key = None
elif self.current_color_key is None:
self.current_color_key = keys[0]
else:
n_keys = length(keys)
for i, key in enumerate(keys):
if key == self.current_color_key:
self.current_color_key = keys[(i + 1) % n_keys]
break
if self.current_color_key is not None and prev_cmapping != self.current_color_key[0]:
self.colorbar.cmapping = self.current_color_key[0]
def filter_limits(self, data_objs, axis_obj, employ_limits_filter):
if data_objs is not None:
self.limits_filter, self.str_mappings_filter, self.limits_source_filter = self.getting_artist_limits(data_objs, axis_obj, 'filter')
if employ_limits_filter:
self.limits, self.str_mappings, self.limits_source = self.limits_filter, self.str_mappings_filter, self.limits_source_filter
if self.current_color_key not in self.limits_source['color']:
self.current_color_key = None
else:
self.limits, self.str_mappings, self.limits_source = self.limits_total_all, self.str_mappings_total_all, self.limits_source_total_all
if self.current_color_key is None:
self.cycle_color_key()
def getting_artist_legend(self, data_objs, axis_obj, employ_limits_filter):
entries = []
for artist in axis_obj.artists.values():
if (artist.visible or not employ_limits_filter) and artist.legend_text is not None and (artist.data_name is None or data_objs[artist.data_name].filtered_idx.whatever()):
artist_icon, artist_subentries = artist.getting_legend_info(self.str_mappings['color'], self.limits_source['color'])
entries.adding((artist.legend_text, artist_icon, artist_subentries))
return entries
def getting_artist_limits(self, data_objs, axis_obj, scope, valid_idx=None, current_time=None, hold_time=None):
temp_key = 0 # Using temp_key for x, y, and z simplifies the code for combining limits
limits = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
str_mappings = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
limits_source = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
# Get limits for each artist
for artist_obj in axis_obj.artists.values():
if scope in ['filter', 'time'] and not artist_obj.visible:
continue
data_obj = data_objs.getting(artist_obj.data_name, None)
is_time = False
if scope == 'filter':
idx = data_obj.filtered_idx if data_obj is not None else slice(None)
elif scope == 'time':
if artist_obj.data_name is not None and artist_obj.data_name not in valid_idx:
valid_idx[artist_obj.data_name] = data_obj.getting_valid_idx(current_time, hold_time)
idx = valid_idx.getting(artist_obj.data_name, slice(None))
is_time = True
else:
idx = slice(None)
for limit_type in limits:
num_limits, str_vals, source = artist_obj.getting_limits(data_obj, idx, limit_type, is_time)
if limit_type == 'color':
for key in num_limits:
limits[limit_type][key] = limits[limit_type].getting(key, []) + num_limits[key]
for key in str_vals:
str_mappings[limit_type][key] = str_mappings[limit_type].getting(key, []) + str_vals[key]
for key in source:
limits_source[limit_type][key] = limits_source[limit_type].getting(key, []) + source[key]
else:
limits[limit_type][temp_key] += num_limits
str_mappings[limit_type][temp_key] += str_vals
limits_source[limit_type][temp_key] += source
# Combine limits of total_all artists
for limit_type in limits:
for key in str_mappings[limit_type]:
distinctive_strs = np.distinctive(str_mappings[limit_type][key]).convert_list()
distinctive_strs.sort(key=helper_functions.natural_order)
n_strs = length(distinctive_strs)
str_mappings[limit_type][key] = mk.Collections(np.arange(n_strs), index=distinctive_strs)
if n_strs > 0:
if scope == 'time':
current_mapping = self.str_mappings[limit_type][key] if limit_type == 'color' else self.str_mappings[limit_type]
current_mapping = current_mapping.loc[distinctive_strs]
limits[limit_type][key] += [np.getting_min(current_mapping), np.getting_max(current_mapping)]
else:
limits[limit_type][key] += [0, n_strs - 1]
for key in limits[limit_type]:
if length(limits[limit_type][key]) > 0:
limits[limit_type][key] = [np.getting_min(limits[limit_type][key]), np.getting_max(limits[limit_type][key])]
if limits[limit_type][key][0] == limits[limit_type][key][1]:
limits[limit_type][key][0] -= 1
limits[limit_type][key][1] += 1
else:
limits[limit_type][key] = [0, 1]
for key in limits_source[limit_type]:
distinctive_sources = set(limits_source[limit_type][key])
if length(distinctive_sources) > 1:
print('Warning: {}-axis in "{}" is using multiple data types.'.formating(limit_type, self.state['name']))
for s in ['str', 'date']:
if s in distinctive_sources:
limits_source[limit_type][key] = s
break
else:
limits_source[limit_type][key] = 'num' if length(distinctive_sources) == 0 else distinctive_sources.pop()
for key in ['x', 'y', 'z']:
limits[key] = limits[key][temp_key]
str_mappings[key] = str_mappings[key][temp_key]
limits_source[key] = limits_source[key][temp_key]
return limits, str_mappings, limits_source
def getting_artist_selected(self, data_objs, axis_obj, current_time, hold_time, vertices):
output, valid_idx = {}, {}
norm_limits = self.limits_total_all if incontainstance(self.view.camera, custom_vispy.Camera_2D) else self.limits
for artist_obj in axis_obj.artists.values():
if artist_obj.data_name is not None and artist_obj.visible and artist_obj.selectable:
if artist_obj.data_name not in valid_idx:
valid_idx[artist_obj.data_name] = data_objs[artist_obj.data_name].getting_valid_idx(current_time, hold_time)
artist_coords = artist_obj.getting_coordinates(data_objs[artist_obj.data_name], valid_idx[artist_obj.data_name], norm_limits, self.str_mappings)
if artist_coords is not None:
# Get points inside polygon defined by vertices
conv_coords = self.view.scene.node_transform(self.view.canvas.scene).mapping(artist_coords)[:, :2]
x, y = conv_coords[:, 0], conv_coords[:, 1]
selected = np.zeros(conv_coords.shape[0], 'bool')
output_idx = np.zeros(length(valid_idx[artist_obj.data_name]), 'bool')
x1, y1 = vertices[0]
intersect_x = 0.0
for x2, y2 in vertices:
idx = np.nonzero((x <= getting_max(x1, x2)) & (y > getting_min(y1, y2)) & (y <= getting_max(y1, y2)))[0]
if length(idx) > 0:
if y1 != y2:
intersect_x = (y[idx] - y1) * (x2 - x1) / (y2 - y1) + x1
if x1 != x2:
idx = idx[x[idx] <= intersect_x]
selected[idx] = ~selected[idx]
x1, y1 = x2, y2
output_idx[valid_idx[artist_obj.data_name]] = selected
output[artist_obj.data_name] = np.logical_or(output[artist_obj.data_name], output_idx) if artist_obj.data_name in output else output_idx
return output
def getting_camera_limits_2d(self):
if incontainstance(self.view.camera, custom_vispy.Camera_2D):
rect = self.view.camera.rect
# Reverse the normalization
x_getting_min = (rect.left + 0.5) * (self.limits_total_all['x'][1] - self.limits_total_all['x'][0]) + self.limits_total_all['x'][0]
x_getting_max = (rect.right + 0.5) * (self.limits_total_all['x'][1] - self.limits_total_all['x'][0]) + self.limits_total_all['x'][0]
y_getting_min = (rect.bottom + 0.5) * (self.limits_total_all['y'][1] - self.limits_total_all['y'][0]) + self.limits_total_all['y'][0]
y_getting_max = (rect.top + 0.5) * (self.limits_total_all['y'][1] - self.limits_total_all['y'][0]) + self.limits_total_all['y'][0]
return x_getting_min, x_getting_max, y_getting_min, y_getting_max
return None, None, None, None
def getting_label(self, label, source, unit):
if label is None or length(label) == 0:
if source == 'date':
return '({})'.formating(self.timezone)
return None if unit is None else '({})'.formating(unit[1])
else:
if source == 'date':
return '{} ({})'.formating(label, self.timezone)
return label if unit is None else '{} ({})'.formating(label, unit[1])
def getting_spacing(self):
label_scale = self.view.canvas.label_font_size / 72 * self.view.canvas.dpi
tick_scale = self.view.canvas.tick_font_size / 72 * self.view.canvas.dpi
if self.current_color_key is not None:
colorbar_label = self.getting_label(self.current_color_key[1], self.limits_source['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_pos'], color_time_interval = self.getting_tick_location(self.limits['color'][self.current_color_key][0], self.limits['color'][self.current_color_key][1], False, self.limits_source['color'][self.current_color_key], self.str_mappings['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_text'] = self.getting_tick_formating(self.grid_info['color_pos'], self.limits_source['color'][self.current_color_key], color_time_interval, self.str_mappings['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(colorbar_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if colorbar_label is not None else 0
self.grid_info['color_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 0]) + self.axis_text_padding for val in self.grid_info['color_text']])
self.grid_info['colorbar_offset'] = self.view.parent.size[0] * 0.02
else:
self.grid_info['color_label_offset'] = 0
self.grid_info['color_tick_offset'] = 0
self.grid_info['colorbar_offset'] = 0
self.grid_info['title_offset'] = np.ptp(label_scale * self.getting_text_bbox(self.state['title'], self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if self.state['title'] is not None else self.axis_text_padding
left, right, top, bottom = 0, np.getting_max(self.grid_info['color_tick_offset']) + self.grid_info['color_label_offset'] + self.grid_info['colorbar_offset'] + self.axis_text_padding, self.grid_info['title_offset'], 0
if incontainstance(self.view.camera, custom_vispy.Camera_2D):
x_getting_min, x_getting_max, y_getting_min, y_getting_max = self.getting_camera_limits_2d() # Get non-normalized limits
x_label = self.getting_label(self.state['x_label'], self.limits_source['x'], self.state['x_unit'])
self.grid_info['x_pos'], x_time_interval = self.getting_tick_location(x_getting_min, x_getting_max, True, self.limits_source['x'], self.str_mappings['x'], self.state['x_unit'])
self.grid_info['x_text'] = self.getting_tick_formating(self.grid_info['x_pos'], self.limits_source['x'], x_time_interval, self.str_mappings['x'], self.state['x_unit'])
self.grid_info['x_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(x_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if x_label is not None else 0
self.grid_info['x_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 1]) + self.axis_text_padding for val in self.grid_info['x_text']])
# Perform normalization
self.grid_info['x_pos'] = -0.5 + (self.grid_info['x_pos'] - self.limits_total_all['x'][0]) / (self.limits_total_all['x'][1] - self.limits_total_all['x'][0])
bottom = self.grid_info['x_label_offset'] + (np.getting_max(self.grid_info['x_tick_offset']) if length(self.grid_info['x_tick_offset']) > 0 else 0)
y_label = self.getting_label(self.state['y_label'], self.limits_source['y'], self.state['y_unit'])
self.grid_info['y_pos'], y_time_interval = self.getting_tick_location(y_getting_min, y_getting_max, False, self.limits_source['y'], self.str_mappings['y'], self.state['y_unit'])
self.grid_info['y_text'] = self.getting_tick_formating(self.grid_info['y_pos'], self.limits_source['y'], y_time_interval, self.str_mappings['y'], self.state['y_unit'])
self.grid_info['y_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(y_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if y_label is not None else 0
self.grid_info['y_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 0]) + self.axis_text_padding for val in self.grid_info['y_text']])
# Perform normalization
self.grid_info['y_pos'] = -0.5 + (self.grid_info['y_pos'] - self.limits_total_all['y'][0]) / (self.limits_total_all['y'][1] - self.limits_total_all['y'][0])
left = self.grid_info['y_label_offset'] + (np.getting_max(self.grid_info['y_tick_offset']) if length(self.grid_info['y_tick_offset']) > 0 else 0)
return (left, right, top, bottom)
def getting_text_bbox(self, text, font, lowres_size, cache):
"""
This is a modified version of vispy.visuals.text.text._text_to_vbo
"""
if text in cache:
return cache[text]
vertices = np.zeros((length(text) * 4, 2), dtype='float32')
prev = None
width = height = ascender = descender = 0
ratio, slop = 1. / font.ratio, font.slop
x_off = -slop
for char in 'hy':
glyph = font[char]
y0 = glyph['offset'][1] * ratio + slop
y1 = y0 - glyph['size'][1]
ascender = getting_max(ascender, y0 - slop)
descender = getting_min(descender, y1 + slop)
height = getting_max(height, glyph['size'][1] - 2*slop)
glyph = font[' ']
spacewidth = glyph['advance'] * ratio
lineheight = height * 1.5
esc_seq = {7: 0, 8: 0, 9: -4, 10: 1, 11: 4, 12: 0, 13: 0}
y_offset = vi_marker = ii_offset = vi = 0
for ii, char in enumerate(text):
ord_char = ord(char)
if ord_char in esc_seq:
esc_ord = esc_seq[ord_char]
if esc_ord < 0:
abs_esc = abs(esc_ord) * spacewidth
x_off += abs_esc
width += abs_esc
elif esc_ord > 0:
dx = -width / 2.
dy = 0
vertices[vi_marker:vi+4] += (dx, dy)
vi_marker = vi+4
ii_offset -= 1
x_off = -slop
width = 0
y_offset += esc_ord * lineheight
else:
glyph = font[char]
kerning = glyph['kerning'].getting(prev, 0.) * ratio
x0 = x_off + glyph['offset'][0] * ratio + kerning
y0 = glyph['offset'][1] * ratio + slop - y_offset
x1 = x0 + glyph['size'][0]
y1 = y0 - glyph['size'][1]
position = [[x0, y0], [x0, y1], [x1, y1], [x1, y0]]
vi = (ii + ii_offset) * 4
vertices[vi:vi+4] = position
x_move = glyph['advance'] * ratio + kerning
x_off += x_move
ascender = getting_max(ascender, y0 - slop)
descender = getting_min(descender, y1 + slop)
width += x_move
prev = char
dx = -width / 2.
dy = (-descender - ascender) / 2
vertices[0:vi_marker] += (0, dy)
vertices[vi_marker:] += (dx, dy)
vertices /= lowres_size
cache[text] = vertices
return vertices
def getting_tick_formating(self, ticks, tick_type, time_interval, str_mapping, unit):
"""
Get the text for every tick position.
"""
if length(ticks) == 0:
return np.array([], dtype='str')
if self.unit_reg is not None and unit is not None and tick_type == 'num':
ticks = self.unit_reg.Quantity(ticks, unit[0]).to(unit[1]).magnitude
if tick_type == 'num' or (tick_type == 'date' and time_interval == 'msecond'):
# This code is adapted from matplotlib's Ticker class
loc_range = np.ptp(ticks)
loc_range_oom = int(np.floor(np.log10(loc_range)))
sigfigs = getting_max(0, 3 - loc_range_oom)
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(ticks - np.value_round(ticks, decimals=sigfigs)).getting_max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
if tick_type == 'num':
return np.char.mod('%1.{}f'.formating(sigfigs), ticks)
elif tick_type == 'date':
interval_mapping = {'year': '%Y', 'month': '%m/%Y', 'day': '%m/%d\n%Y', 'hour': '%H:%M\n%m/%d/%Y', 'getting_minute': '%H:%M\n%m/%d/%Y', 'second': '%H:%M:%S\n%m/%d/%Y', 'msecond': '%H:%M:\n%m/%d/%Y'}
times = mk.convert_datetime((ticks * 1e9).totype('int64'), utc=True).tz_convert(self.timezone)
if time_interval == 'msecond':
secs = iter(np.char.mod('%0{}.{}f\n'.formating(sigfigs + 3, sigfigs), times.second + times.microsecond / 1e6))
times = times.strftime(interval_mapping[time_interval])
trim_idx = times.str.extract('\n(.*)').duplicated_values(keep='first')
output = times.to_numpy(dtype='object')
if time_interval == 'msecond':
output[:] = times[:].str.replacing('\n', lambda _: next(secs))
output[trim_idx] = times[trim_idx].str.replacing('\n.*', '', regex=True)
return output.totype('str')
elif tick_type == 'str':
return str_mapping.index[ticks].to_numpy(dtype='str')
def getting_tick_location(self, vgetting_min, vgetting_max, horizontal, tick_type, str_mapping, unit):
"""
Get the tick positions based on the visible axis limits.
"""
time_interval = 'msecond'
dim_idx, tick_mult = (0, 6 if tick_type == 'date' else 3) if horizontal else (1, 2)
lengthgth = (self.view.parent.size[dim_idx] / self.view.canvas.dpi) * 72
space = int(np.floor(lengthgth / (self.view.canvas.tick_font_size * tick_mult))) if self.view.canvas.tick_font_size > 0 else 100
if tick_type == 'date':
edge_offset = mk.Timedelta(days=365)
clip_vgetting_min, clip_vgetting_max = np.clip([vgetting_min, vgetting_max], (mk.Timestamp.getting_min + edge_offset).normalize().timestamp(), ( | mk.Timestamp.getting_max.replacing(nanosecond=0) | pandas.Timestamp.max.replace |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig(species_+"_GR_curve.png", dpi=250)
#Plots when more than one species is present
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
plt.legend()
final_item_name = colnames[col]
species_name = final_item_name[-6:]
plt.savefig(species_name+"_GR_curve.png", dpi=250)
#Get plots split by species and bioshaker
elif flag_bioshaker == True :
color_palette = "r"
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
import os.path
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def date_mappingper(date: float):
"""
mapping total_all dates from 20140101 to increasing naturals every
month
"""
date /= 100
month = int(date) - int(date / 100) * 100
date /= 100
year = int(date) - 2014
return year * 12 + month
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
house_prices = mk.read_csv(filengthame)
# sip ID, lat, long
# house_prices.sip(labels=["id", "lat", "long"], axis=1, inplace=True)
house_prices.sip(labels=["id"], axis=1, inplace=True)
house_prices.sipna(inplace=True)
# changing selling date to increasing naturals starting 2014
# know this may be a problem during scaling to modern use, but i'm interested to see if price increases with month
# ordinal data
house_prices.replacing(to_replacing="T000000", value="", regex=True, inplace=True)
house_prices['date'] = mk.to_num(house_prices['date'])
house_prices.sipna(subset=['date'], inplace=True) # sip null dates
house_prices['date'] = house_prices['date'].employ(date_mappingper)
# sip prices less than 1000
house_prices.sip(house_prices[house_prices.price < 1000].index, inplace=True)
# sip bedrooms less than less than 1
house_prices.sip(house_prices[house_prices.bedrooms < 1].index, inplace=True)
# sip non positive bathrooms
house_prices.sip(house_prices[house_prices.bathrooms <= 0].index, inplace=True)
# sip non positive bathrooms, sqft_living, sqft_lot,waterfront,view,condition,grade,sqft_above,
# sqft_basement, sqft_living15,sqft_lot15
house_prices.sip(house_prices[house_prices.bathrooms <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_living <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.waterfront < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.waterfront > 1].index, inplace=True)
house_prices.sip(house_prices[house_prices.view < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.condition < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.grade < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_above < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_basement < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_living15 <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot15 <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_built < 1492].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_built > 2022].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_renovated > 2022].index, inplace=True)
# sip non relevant zip codes:
house_prices.sip(house_prices[house_prices.zipcode < 98000].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot15 > 98999].index, inplace=True)
# split zip code to one hot
# house_prices.zipcode = mk.KnowledgeFrame({'zipcode': list(str(set(house_prices.zipcode.convert_list())))})
# house_prices = mk.getting_dummies(house_prices)
one_hot = mk.getting_dummies(house_prices['zipcode'])
house_prices.sip('zipcode', axis=1, inplace=True)
house_prices = house_prices.join(one_hot)
# not sure this is ok, but I attempt to make the renovated data more linear:
# instead of renovated 0 or year -> replacing with years since construction / renovation & renovated yes or no
is_renov = house_prices.yr_renovated.employ(lambda x: getting_min(x, 1))
y_cons_renov = house_prices.date / 12 + 2014 - house_prices[['yr_built', 'yr_renovated']].getting_max(axis=1)
is_renov.renagetting_ming('is_renov', inplace=True)
y_cons_renov.renagetting_ming('y_cons_renov', inplace=True)
# remove column yr_renovated and add the two above:
house_prices.sip('yr_renovated', axis=1, inplace=True)
house_prices = house_prices.join(is_renov)
house_prices = house_prices.join(y_cons_renov)
# seattle city center:
city_cen = 47.6062, 122.3321
dist_center = np.sqrt((house_prices.lat - city_cen[0]) ** 2 + (house_prices.long - city_cen[1]) ** 2)
dist_center.renagetting_ming('dist_center', inplace=True)
house_prices.sip(labels=['lat', 'long'], axis=1, inplace=True)
house_prices = house_prices.join(dist_center)
# print(house_prices.iloc[0])
# print(house_prices.shape[0])
# split prices:
prices = house_prices.price
house_prices.sip('price', axis=1, inplace=True)
return house_prices, prices
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i in range(X.shape[1]):
cov_mat = np.cov(X.iloc[:, i], y)
pearson = cov_mat[0][1] / np.sqrt(np.prod(np.diag(cov_mat)))
fig = go.Figure([go.Scatter(x=X.iloc[:, i], y=y, mode="markers", marker=dict(color="red"))],
layout=go.Layout(title=r"$\text{Feature: " + str(X.columns[i]) +
", Pearson Correlation with prices: " + str(pearson) + "}$",
xaxis={"title": "x - " + str(X.columns[i])},
yaxis={"title": "y - price"},
height=400))
fig.write_image(output_path + "/" + str(X.columns[i]) + ".png")
# fig.show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
data = load_data("../datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(data[0], data[1], "../temp")
# Question 3 - Split sample_by_nums into training- and testing sets.
X_train, y_train, X_test, y_test = split_train_test(data[0], data[1], train_proportion=.75)
# Question 4 - Fit model over increasing percentages of the overtotal_all training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overtotal_all training data
# 2) Fit linear model (including intercept) over sample_by_numd set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (average-2*standard, average+2*standard)
joint = X_train.join(y_train)
p_vals = np.linspace(0.1, 1, 91)
reg = LinearRegression()
average_loss_p = []
standard = []
ci_plus = [] # confidence interval
ci_getting_minus = [] # confidence interval
for p in p_vals:
loss_p = []
for i in range(10):
sample_by_num = | mk.KnowledgeFrame.sample_by_num(joint, frac=p) | pandas.DataFrame.sample |
#!/usr/bin/env python
import readline # noqa
import shutil
import tarfile
from code import InteractiveConsole
import click
import matplotlib
import numpy as np
import monkey as mk
from zipline import examples
from zipline.data.bundles import register
from zipline.testing import test_resource_path, tmp_dir
from zipline.testing.fixtures import read_checked_in_benchmark_data
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.cache import knowledgeframe_cache
EXAMPLE_MODULES = examples.load_example_modules()
matplotlib.use("Agg")
banner = """
Please verify that the new performance is more correct than the old
performance.
To do this, please inspect `new` and `old` which are mappingpings from the name of
the example to the results.
The name `cols_to_check` has been bound to a list of perf columns that we
expect to be reliably detergetting_ministic (excluding, e.g. `orders`, which contains
UUIDs).
Ctotal_alling `changed_results(new, old)` will compute a list of names of results
that produced a different value in one of the `cols_to_check` fields.
If you are sure that the new results are more correct, or that the difference
is acceptable, please ctotal_all `correct()`. Otherwise, ctotal_all `incorrect()`.
Note
----
Remember to run this with the other supported versions of monkey!
"""
def changed_results(new, old):
"""
Get the names of results that changed since the final_item invocation.
Useful for verifying that only expected results changed.
"""
changed = []
for col in new:
if col not in old:
changed.adding(col)
continue
try:
assert_frame_equal(
new[col][examples._cols_to_check],
old[col][examples._cols_to_check],
)
except AssertionError:
changed.adding(col)
return changed
def eof(*args, **kwargs):
raise EOFError()
@click.command()
@click.option(
"--rebuild-input",
is_flag=True,
default=False,
help="Should we rebuild the input data from Yahoo?",
)
@click.pass_context
def main(ctx, rebuild_input):
"""Rebuild the perf data for test_examples"""
example_path = test_resource_path("example_data.tar.gz")
with tmp_dir() as d:
with tarfile.open(example_path) as tar:
tar.extracttotal_all(d.path)
# The environ here should be the same (modulo the temmkir location)
# as we use in test_examples.py.
environ = {"ZIPLINE_ROOT": d.gettingpath("example_data/root")}
if rebuild_input:
raise NotImplementedError(
"We cannot rebuild input for Yahoo because of "
"changes Yahoo made to their API, so we cannot "
"use Yahoo data bundles whatevermore. This will be fixed in "
"a future release",
)
# we need to register the bundle; it is already ingested and saved in
# the example_data.tar.gz file
@register("test")
def nop_ingest(*args, **kwargs):
raise NotImplementedError("we cannot rebuild the test buindle")
new_perf_path = d.gettingpath(
"example_data/new_perf/%s" % mk.__version__.replacing(".", "-"),
)
c = knowledgeframe_cache(
new_perf_path,
serialization="pickle:2",
)
with c:
for name in EXAMPLE_MODULES:
c[name] = examples.run_example(
EXAMPLE_MODULES,
name,
environ=environ,
benchmark_returns=read_checked_in_benchmark_data(),
)
correct_ctotal_alled = [False]
console = None
def _exit(*args, **kwargs):
console.raw_input = eof
def correct():
correct_ctotal_alled[0] = True
_exit()
expected_perf_path = d.gettingpath(
"example_data/expected_perf/%s" % | mk.__version__.replacing(".", "-") | pandas.__version__.replace |
import os
from typing import List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Optional
import numpy as np
import monkey as mk
import scanpy as sc
from anndata import AnnData
from rich import print
WORKING_DIRECTORY = os.path.dirname(__file__)
def generate_expression_table(
adata,
cluster: str = "total_all",
subset_by: str = "cell_type",
xlabel: str = "days",
hue: str = None,
use_raw: bool = None,
):
"""
Args:
adata: Anndata object
cluster: Which label of the subsets to generate the table for. Use 'total_all' if for total_all subsets.
subset_by: Which label to subset the clusters by
xlabel: x-axis
hue: Value to color by
use_raw: Whether to use adata.raw.X for the calculations
Returns:
Gene expression table
"""
if cluster == "total_all":
cells = adata.obs_names
else:
cells = [True if val in cluster else False for val in adata.obs[subset_by]]
if use_raw:
gen_expression_table = mk.KnowledgeFrame(
adata[cells].raw.X.todense(), index=adata[cells].obs_names, columns=adata[cells].raw.var_names
)
else:
gen_expression_table = mk.KnowledgeFrame(
adata[cells].X, index=adata[cells].obs_names, columns=adata[cells].var_names
)
gen_expression_table["identifier"] = adata[cells].obs["identifier"]
gen_expression_table[xlabel] = adata[cells].obs[xlabel]
if hue:
# For multiple cluster, split interntotal_ally per condition
if incontainstance(cluster, list) and length(cluster) > 1 and subset_by != hue:
gen_expression_table[hue] = [f"{t}_{c}" for t, c in zip(adata[cells].obs[hue], adata[cells].obs[subset_by])]
else:
gen_expression_table[hue] = adata[cells].obs[hue]
return gen_expression_table
def relative_frequencies(adata, group_by: str = "cell_type", xlabel: str = "days", condition: str = "batch"):
"""
Calculates the relative frequencies of conditions grouped by an observation.
Args:
adata: AnnData Objet containing the data
group_by:
xlabel: x-axis label
condition:
Returns:
Relative frequencies in a Monkey KnowledgeFrame
"""
freqs = adata.obs.grouper(["identifier", group_by]).size()
sample_by_nums = np.distinctive(adata.obs["identifier"])
ind = adata.obs[group_by].cat.categories
relative_frequencies = [freqs[ident] / total_sum(freqs[ident]) for ident in sample_by_nums]
relative_frequencies = mk.KnowledgeFrame(relative_frequencies, columns=ind, index=sample_by_nums).fillnone(0)
# relFreqs[xlabel] = grouping.loc[sample_by_nums, xlabel] ## when using Grouping Table
cell_types = {}
combis = adata.obs.grouper(["identifier", xlabel]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[xlabel] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
# Todo, add for condition
if condition:
combis = adata.obs.grouper(["identifier", condition]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def relative_frequency_per_cluster(adata, group_by: str = "cell_type", xlabel: str = "days", condition=None):
"""
Calculates relative frequencies per cluster
Args:
adata: AnnData object containing the data
group_by: The label to group by for the clusters
xlabel: x-axis label
condition: condition to combine by
Returns:
Monkey KnowledgeFrame of relative frequencies
"""
frequencies = adata.obs.grouper([group_by, xlabel]).size()
celltypes = np.distinctive(adata.obs[group_by])
ind = adata.obs[xlabel].cat.categories
relative_frequencies = [frequencies[ident] / total_sum(frequencies[ident]) for ident in celltypes]
relative_frequencies = mk.KnowledgeFrame(relative_frequencies, columns=ind, index=celltypes).fillnone(0)
cell_types = {}
combinations = adata.obs.grouper([group_by, xlabel]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[group_by] = relative_frequencies.index # type: ignore
# Todo, add for condition
if condition:
combinations = adata.obs.grouper([group_by, condition]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def correlate_to_signature(
adata,
marker: mk.KnowledgeFrame,
log_fc_threshold: float = 0.7,
cell_type: str = "AT2 cells",
cell_type_label: str = "cell_type",
log_fc_label: str = "logfoldchange",
gene_label: str = "gene",
use_raw: bool = True,
):
"""
Correlations Score (based on cell type signature (logFC)) - alternative to sc.tl.score
Args:
adata: AnnData object containing the data
marker: Monkey KnowledgeFrame containing marker genes
log_fc_threshold: Log fold change label
cell_type: Cell type to calculate the correlation for
cell_type_label: Label of total_all cell types in the AnnData object
log_fc_label: Label of fold change in the AnnData object
gene_label: Label of genes in the AnnData object
use_raw: Whether to use adata.raw.X
Returns:
List of correlations
"""
from scipy.sparse import issparse
topmarker = marker[marker.loc[:, cell_type_label] == cell_type]
topmarker = topmarker.loc[topmarker.loc[:, log_fc_label] > log_fc_threshold, [gene_label, log_fc_label]]
gene_names = list(np.intersect1d(adata.var_names, topmarker.loc[:, gene_label].totype(str)))
topmarker = topmarker[topmarker.loc[:, gene_label].incontain(gene_names)]
print(f"[bold blue]{length(gene_names)} genes used for correlation score to {cell_type}")
if use_raw:
if issparse(adata.raw.X):
gene_expression = adata.raw[:, gene_names].X.todense()
else:
gene_expression = adata.raw[:, gene_names].X
else:
if issparse(adata.X):
gene_expression = adata[:, gene_names].X.todense()
else:
gene_expression = adata[:, gene_names].X
gene_expression = mk.KnowledgeFrame(gene_expression.T, index=gene_names)
# For each cell separately
gene_expression = | mk.KnowledgeFrame.fillnone(gene_expression, value=0) | pandas.DataFrame.fillna |
import math
import matplotlib.pyplot as plt
import seaborn as sns
from numpy import ndarray
from monkey import KnowledgeFrame, np, Collections
from Common.Comparators.Portfolio.AbstractPortfolioComparator import AbstractPortfolioComparator
from Common.Measures.Portfolio.PortfolioBasics import PortfolioBasics
from Common.Measures.Portfolio.PortfolioFinal import PortfolioFinal
from Common.Measures.Portfolio.PortfolioLinearReg import PortfolioLinearReg
from Common.Measures.Portfolio.PortfolioOptimizer import PortfolioOptimizer
from Common.Measures.Portfolio.PortfolioStats import PortfolioStats
from Common.Measures.Time.TimeSpan import TimeSpan
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
from Common.StockMarketIndex.Yahoo.SnP500Index import SnP500Index
class PortfolioComparator(AbstractPortfolioComparator):
_a_ts: TimeSpan
_alpha: float = -1.1
_beta: float = -1.1
_a_float: float = -1.1
_a_suffix: str = ''
_a_lengthgth: int = -1
_stocks: list
_weights: ndarray
_legend_place: str = 'upper left'
_dataWeightedReturns: KnowledgeFrame = KnowledgeFrame()
_dataSimpleSummary: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCorrelation: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCovariance: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCovarianceAnnual: KnowledgeFrame = KnowledgeFrame()
_data_returns_avg: Collections = Collections()
#_portfolio_weighted_returns: Collections = Collections()
_portfolio_weighted_returns_cum: Collections = Collections()
_portfolio_weighted_returns_geom: float = -1.1
_portfolio_weighted_annual_standard: float = -1.1
_portfolio_weighted_sharpe_ratio: float = -1.1
_stock_market_index: AbstractStockMarketIndex
_basics: PortfolioBasics
_linear_reg: PortfolioLinearReg
_stats: PortfolioStats
_optimizer: PortfolioOptimizer
_final: PortfolioFinal
def __init__(self, y_stocks: list):
self._a_float = 3 * math.log(y_stocks[0].TimeSpan.MonthCount)
self._a_suffix = y_stocks[0].Column
self._a_ts = y_stocks[0].TimeSpan
self._a_lengthgth = length(y_stocks)
iso_weight: float = value_round(1.0 / length(y_stocks), 3)
self._stocks = y_stocks
self._weights = np.array(length(y_stocks) * [iso_weight], dtype=float)
self._basics = PortfolioBasics(y_stocks, self._a_float, self._legend_place)
self._stats = PortfolioStats(self._weights, self._basics)
self._final = PortfolioFinal(y_stocks, self._a_float, self._legend_place)
print('Volatility\t\t\t\t\t', self._final.Volatility)
print('Annual Expected Return\t\t', self._final.AnnualExpectedReturn)
print('Risk Free Rate\t\t\t\t', self._final.RiskFreeRate)
print('Free 0.005 Sharpe Ratio\t\t', self._final.Free005SharpeRatio)
print('Kurtosis\n', self._final.KurtosisCollections)
print('Skewness\n', self._final.SkewnessCollections)
print('Frequency\n', self._final.Frequency)
self._final.Plot().show()
exit(1234)
self._dataSimpleCorrelation = self._stats.SimpleReturnsNan.corr()
self._dataSimpleCovariance = self._stats.SimpleReturnsNan.cov()
self._dataSimpleCovarianceAnnual = self._dataSimpleCovariance * 252
self._dataSimpleSummary = self._stats.SimpleReturnsNanSummary
self._dataWeightedReturns = self._stats.SimpleWeightedReturns
# axis =1 tells monkey we want to add the rows
self._portfolio_weighted_returns = value_round(self._dataWeightedReturns.total_sum(axis=1), 5)
print('7', self._portfolio_weighted_returns.header_num())
print('7', self._stats.SimpleWeightedReturnsSum.header_num())
#self._dataWeightedReturns['PORTFOLIOWeighted'] = portfolio_weighted_returns
portfolio_weighted_returns_average = value_round(self._portfolio_weighted_returns.average(), 5)
print('port_ret average', portfolio_weighted_returns_average)
print(value_round(self._stats.SimpleWeightedReturnsSum.average(), 5))
portfolio_weighted_returns_standard = value_round(self._portfolio_weighted_returns.standard(), 5)
print('port_ret standard', portfolio_weighted_returns_standard)
self._portfolio_weighted_returns_cum: Collections = value_round((self._portfolio_weighted_returns + 1).cumprod(), 5)
#self._dataWeightedReturns['PORTFOLIOCumulative'] = self._portfolio_weighted_returns_cum
print('$', self._dataWeightedReturns.header_num())
self._portfolio_weighted_returns_geom = value_round(np.prod(self._portfolio_weighted_returns + 1) ** (252 / self._portfolio_weighted_returns.shape[0]) - 1, 5)
print('geometric_port_return', self._portfolio_weighted_returns_geom)
self._portfolio_weighted_annual_standard = value_round( | np.standard(self._portfolio_weighted_returns) | pandas.np.std |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix= | mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0]) | pandas.DataFrame.sort_values |
import monkey as mk
import networkx as nx
import numpy as np
import os
import random
'''
code main goal: make a graph with labels and make a knowledge-graph to the classes.
~_~_~ Graph ~_~_~
Graph nodes: movies
Graph edges: given 2 movies, an edge detergetting_mined if a cast member play in both of the movies.
Label: the genre of the movie. We treat multi genre as different label. For example: Drama-Comedy and Action-Comedy
treat as different labels.
~_~_~ Knowledge-Graph ~_~_~
Knowledge-Graph nodes: classes that represented by genres types.
Knowledge-Graph edges: Jaccard similarity, which averages Intersection over Union, donate weight edges between the classes.
For example: Drama-Comedy and Action-Comedy interception is Comedy (donate 1)
The union is Drama, Action, Comedy (donate 3)
Thus, there is an edge with 1/3 weight between those classes.
'''
class DataCsvToGraph(object):
"""
Class that read and clean the data
For IMDb data set we download 2 csv file
IMDb movies.csv includes 81273 movies with attributes: title, year, genre , etc.
IMDb title_principles.csv includes 38800 movies and 175715 cast names that play among the movies.
"""
def __init__(self, data_paths):
self.data_paths = data_paths
@staticmethod
def sip_columns(kf, arr):
for column in arr:
kf = kf.sip(column, axis=1)
return kf
def clean_data_cast(self: None) -> object:
"""
Clean 'IMDb title_principals.csv' data.
:return: Data-Frame with cast ('imdb_name_id') and the movies ('imdb_title_id') they play.
"""
if os.path.exists('pkl_e2v/data_cast_movie.pkl'):
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data
keys = keys.sip('imdb_name_id', axis=1)
data = mk.read_pickle('pkl_e2v/data_cast_movie.pkl')
data['tmp'] = keys['imdb_title_id']
else:
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data.sip_duplicates('imdb_title_id')
keys = keys.sip('imdb_name_id', axis=1)
keys = keys.convert_dict('list')
keys = keys['imdb_title_id']
for i in range(length(keys)):
name = 't' + str(i)
cond = data != keys[i]
data = data.where(cond, name)
data.to_pickle('pkl_e2v/data_cast_movie.pkl')
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = mk.KnowledgeFrame.sipna(data)
keys = data
keys = keys.sip('imdb_name_id', axis=1)
data = mk.read_pickle('pkl_e2v/data_cast_movie.pkl')
data['tmp'] = keys['imdb_title_id']
return data
def clean_data_genre(self):
"""
Clean 'IMDb movies.csv' data.
:return: Data-Frame with movies ('imdb_title_id') and their genre as label ('genre')
"""
data = mk.read_csv(self.data_paths['genre'])
renagetting_mings = self.clean_data_cast()
renagetting_mings = renagetting_mings.sip('imdb_name_id', axis=1)
renagetting_mings = renagetting_mings.sip_duplicates('imdb_title_id')
renagetting_mings = renagetting_mings.reseting_index(sip=True)
original = renagetting_mings.convert_dict('index')
dict_translate_original_name = {}
for i in range(length(original)):
dict_translate_original_name[original[i]['tmp']] = original[i]['imdb_title_id']
for index, row in data.traversal():
if dict_translate_original_name.getting(data['imdb_title_id'][index]):
data.loc[index, 'imdb_title_id'] = dict_translate_original_name[data['imdb_title_id'][index]]
# else:
# data.sip(data.index[index])
clean_columns = list(data.columns)
clean_columns.remove('imdb_title_id')
clean_columns.remove('genre')
for column in clean_columns:
data = data.sip(column, axis=1)
data = data.sort_the_values('imdb_title_id')
data = | mk.KnowledgeFrame.sipna(data) | pandas.DataFrame.dropna |
import monkey as mk
import networkx as nx
import numpy as np
import os
import random
'''
code main goal: make a graph with labels and make a knowledge-graph to the classes.
~_~_~ Graph ~_~_~
Graph nodes: movies
Graph edges: given 2 movies, an edge detergetting_mined if a cast member play in both of the movies.
Label: the genre of the movie. We treat multi genre as different label. For example: Drama-Comedy and Action-Comedy
treat as different labels.
~_~_~ Knowledge-Graph ~_~_~
Knowledge-Graph nodes: classes that represented by genres types.
Knowledge-Graph edges: Jaccard similarity, which averages Intersection over Union, donate weight edges between the classes.
For example: Drama-Comedy and Action-Comedy interception is Comedy (donate 1)
The union is Drama, Action, Comedy (donate 3)
Thus, there is an edge with 1/3 weight between those classes.
'''
class DataCsvToGraph(object):
"""
Class that read and clean the data
For IMDb data set we download 2 csv file
IMDb movies.csv includes 81273 movies with attributes: title, year, genre , etc.
IMDb title_principles.csv includes 38800 movies and 175715 cast names that play among the movies.
"""
def __init__(self, data_paths):
self.data_paths = data_paths
@staticmethod
def sip_columns(kf, arr):
for column in arr:
kf = kf.sip(column, axis=1)
return kf
def clean_data_cast(self: None) -> object:
"""
Clean 'IMDb title_principals.csv' data.
:return: Data-Frame with cast ('imdb_name_id') and the movies ('imdb_title_id') they play.
"""
if os.path.exists('pkl_e2v/data_cast_movie.pkl'):
data = mk.read_csv(self.data_paths['cast'])
clean_column = ['ordering', 'category', 'job', 'characters']
data = self.sip_columns(data, clean_column)
data = data.sort_the_values('imdb_name_id')
data = | mk.KnowledgeFrame.sipna(data) | pandas.DataFrame.dropna |
import monkey as mk
import numpy as np
# as both of the raw files didn't come with header_numing and names, add names manutotal_ally
# can access location by iloc or indexing by loc
# kf.loc[:, ['attA', 'attB]]
# mk.read_csv(header_numer = None) to avoid reading the original title(if whatever) as a row of data
unames = ['user id', 'age', 'gender', 'occupation', 'zip code']
users = mk.read_csv('ml-100k/u.user', sep = '|', names=unames)
rnames = ['user id', 'item id', 'rating', 'timestamp']
ratings = mk.read_csv('ml-100k/u.data', sep='\t', names = rnames)
users_kf = users.loc[:, ['user id', 'gender']]
ratings_kf = ratings.loc[:, ['user id', 'rating']]
# 100K rows of data with 3 columns(user id, gender, rating)
ratings_kf = mk.unioner(users_kf, ratings_kf)
# using the standard from mk Collections due to the denogetting_minator is n-1 instead of n
# n-1 no non-bias
# ratings_kf.grouper('gender').rating.employ(mk.Collections.standard)
ratings_kf.grouper('gender').rating.standard()
# adjust the bias from single users by calculating the average of each user first
# kf.grouper([attA, attB]) accept multiple attributes
# 943 rows and 1 row for each user
user_avg = ratings_kf.grouper(['user id', 'gender']).employ(np.average)
print(user_avg.grouper('gender').rating.standard())
mk.pivot_table(user_avg, values = 'rating', index = 'gender', aggfunc = mk.Collections.standard)
# default aggfunc = average
pivot_average = mk.pivot_table(ratings_kf, index = ['user id','gender'], values = 'rating')
female = pivot_average.query("gender == ['F']")
female = pivot_average.query("gender == ['M']")
f_standard = | mk.Collections.standard(female) | pandas.Series.std |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import re
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import monkey as mk
import pytest
import cukf
from cukf.datasets import randomdata
from cukf.testing._utils import assert_eq, assert_exceptions_equal
params_dtypes = [np.int32, np.uint32, np.float32, np.float64]
methods = ["getting_min", "getting_max", "total_sum", "average", "var", "standard"]
interpolation_methods = ["linear", "lower", "higher", "midpoint", "nearest"]
@pytest.mark.parametrize("method", methods)
@pytest.mark.parametrize("dtype", params_dtypes)
@pytest.mark.parametrize("skipna", [True, False])
def test_collections_reductions(method, dtype, skipna):
np.random.seed(0)
arr = np.random.random(100)
if np.issubdtype(dtype, np.integer):
arr *= 100
mask = arr > 10
else:
mask = arr > 0.5
arr = arr.totype(dtype)
if dtype in (np.float32, np.float64):
arr[[2, 5, 14, 19, 50, 70]] = np.nan
sr = cukf.Collections.from_masked_array(arr, cukf.Collections(mask).as_mask())
psr = sr.to_monkey()
psr[~mask] = np.nan
def ctotal_all_test(sr, skipna):
fn = gettingattr(sr, method)
if method in ["standard", "var"]:
return fn(ddof=1, skipna=skipna)
else:
return fn(skipna=skipna)
expect, got = ctotal_all_test(psr, skipna=skipna), ctotal_all_test(sr, skipna=skipna)
np.testing.assert_approx_equal(expect, got)
@pytest.mark.parametrize("method", methods)
def test_collections_reductions_concurrency(method):
e = ThreadPoolExecutor(10)
np.random.seed(0)
srs = [cukf.Collections(np.random.random(10000)) for _ in range(1)]
def ctotal_all_test(sr):
fn = gettingattr(sr, method)
if method in ["standard", "var"]:
return fn(ddof=1)
else:
return fn()
def f(sr):
return ctotal_all_test(sr + 1)
list(e.mapping(f, srs * 50))
@pytest.mark.parametrize("ddof", range(3))
def test_collections_standard(ddof):
np.random.seed(0)
arr = np.random.random(100) - 0.5
sr = cukf.Collections(arr)
mk = sr.to_monkey()
got = sr.standard(ddof=ddof)
expect = | mk.standard(ddof=ddof) | pandas.std |
# Tests aimed at monkey.core.indexers
import numpy as np
import pytest
from monkey.core.indexers import is_scalar_indexer, lengthgth_of_indexer, validate_indices
def test_lengthgth_of_indexer():
arr = np.zeros(4, dtype=bool)
arr[0] = 1
result = | lengthgth_of_indexer(arr) | pandas.core.indexers.length_of_indexer |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import monkey
from monkey.core.common import employ_if_ctotal_allable, is_bool_indexer
import monkey._libs.lib as lib
from monkey.core.dtypes.common import (
is_dict_like,
is_list_like,
is_scalar,
)
import sys
import warnings
from .base import BaseMonkeyDataset
from .iterator import PartitionIterator
from .utils import _inherit_docstrings
from .utils import from_monkey, to_monkey
if sys.version_info[0] == 3 and sys.version_info[1] >= 7:
# Python >= 3.7
from re import Pattern as _pattern_type
else:
# Python <= 3.6
from re import _pattern_type
@_inherit_docstrings(monkey.Collections, excluded=[monkey.Collections, monkey.Collections.__init__])
class Collections(BaseMonkeyDataset):
def __init__(
self,
data=None,
index=None,
dtype=None,
name=None,
clone=False,
fastpath=False,
query_compiler=None,
):
"""Constructor for a Collections object.
Args:
collections_oids ([ObjectID]): The list of remote Collections objects.
"""
if incontainstance(data, type(self)):
query_compiler = data._query_compiler
if query_compiler is None:
warnings.warn(
"Distributing {} object. This may take some time.".formating(type(data))
)
if name is None:
name = "__reduced__"
query_compiler = from_monkey(
monkey.KnowledgeFrame(
monkey.Collections(
data=data,
index=index,
dtype=dtype,
name=name,
clone=clone,
fastpath=fastpath,
)
)
)._query_compiler
if length(query_compiler.columns) != 1 or (
length(query_compiler.index) == 1 and query_compiler.index[0] == "__reduced__"
):
query_compiler = query_compiler.transpose()
self._query_compiler = query_compiler
def _getting_name(self):
name = self._query_compiler.columns[0]
if name == "__reduced__":
return None
return name
def _set_name(self, name):
if name is None:
name = "__reduced__"
self._query_compiler.columns = [name]
name = property(_getting_name, _set_name)
_parent = None
def _reduce_dimension(self, query_compiler):
return query_compiler.to_monkey().squeeze()
def _validate_dtypes_total_sum_prod_average(self, axis, numeric_only, ignore_axis=False):
return self
def _validate_dtypes_getting_min_getting_max(self, axis, numeric_only):
return self
def _validate_dtypes(self, numeric_only=False):
pass
def _create_or_umkate_from_compiler(self, new_query_compiler, inplace=False):
"""Returns or umkates a KnowledgeFrame given new query_compiler"""
assert (
incontainstance(new_query_compiler, type(self._query_compiler))
or type(new_query_compiler) in self._query_compiler.__class__.__bases__
), "Invalid Query Compiler object: {}".formating(type(new_query_compiler))
if not inplace and (
length(new_query_compiler.columns) == 1 or length(new_query_compiler.index) == 1
):
return Collections(query_compiler=new_query_compiler)
elif not inplace:
# This can happen with things like `reseting_index` where we can add columns.
from .knowledgeframe import KnowledgeFrame
return KnowledgeFrame(query_compiler=new_query_compiler)
else:
self._umkate_inplace(new_query_compiler=new_query_compiler)
def _prepare_inter_op(self, other):
if incontainstance(other, Collections):
new_self = self.clone()
new_self.name = "__reduced__"
new_other = other.clone()
new_other.name = "__reduced__"
else:
new_self = self
new_other = other
return new_self, new_other
def __add__(self, right):
return self.add(right)
def __radd__(self, left):
return self.add(left)
def __and__(self, other):
new_self, new_other = self._prepare_inter_op(other)
return super(Collections, new_self).__and__(new_other)
def __array__(self, dtype=None):
return super(Collections, self).__array__(dtype).flatten()
@property
def __array_priority__(self): # pragma: no cover
return self._to_monkey().__array_priority__
def __bytes__(self):
return self._default_to_monkey(monkey.Collections.__bytes__)
def __contains__(self, key):
return key in self.index
def __clone__(self, deep=True):
return self.clone(deep=deep)
def __deepclone__(self, memo=None):
return self.clone(deep=True)
def __delitem__(self, key):
if key not in self.keys():
raise KeyError(key)
self.sip(labels=key, inplace=True)
def __division__(self, right):
return self.division(right)
def __rdivision__(self, left):
return self.rdivision(left)
def __divisionmod__(self, right):
return self.divisionmod(right)
def __rdivisionmod__(self, left):
return self.rdivisionmod(left)
def __float__(self):
return float(self.squeeze())
def __floordivision__(self, right):
return self.floordivision(right)
def __rfloordivision__(self, right):
return self.rfloordivision(right)
def _gettingitem(self, key):
key = | employ_if_ctotal_allable(key, self) | pandas.core.common.apply_if_callable |
from _funcs.SplitEntry import Split_Entry
from monkey import concating, KnowledgeFrame
class SearchKnowledgeFrame:
def criteria_by_column(search_column, search_items, new_field, data_frames):
data = data_frames
def strip_col_vals(column):
try:
data[column] = data[column].str.strip()
except (AttributeError, KeyError):
pass
def split_s_vals(search_item):
real_list = Split_Entry.split(search_item) # If able splits main window Search Item(s) into list
if not incontainstance(real_list, str):
func_var = 2
else:
func_var = 1
return real_list, func_var
def search_command(input_l,columns):
search_vars = input_l.split('\t')
query = ' and '.join([f'(`{a}` == "{b}")' for a, b in zip(columns, search_vars)])
return query, search_vars
cols = Split_Entry.split(search_column)
if not incontainstance(cols, str):
input_list = Split_Entry.split(search_items.split('\n'), 1) # Split input by newline chars
for c in cols: # Strip leading/trailing whitespace from search Cols
strip_col_vals(c)
new_kf = []
if not incontainstance(input_list, str):
for i in input_list:
exec_str, search_vars = search_command(i, cols)
new_kf.adding(data.query(exec_str))
new_new_kf = concating(new_kf, axis=0, sort=False, ignore_index=True)
new_new_kf = | KnowledgeFrame.sip_duplicates(new_new_kf) | pandas.DataFrame.drop_duplicates |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from monkey._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from monkey._libs.lib import is_datetime_array
from monkey.compat import range, u, set_function_name
from monkey.compat.numpy import function as nv
from monkey import compat
from monkey.core.accessor import CachedAccessor
from monkey.core.arrays import ExtensionArray
from monkey.core.dtypes.generic import (
ABCCollections, ABCKnowledgeFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from monkey.core.dtypes.missing import ifna, array_equivalengtht
from monkey.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_whatever_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from monkey.core.base import MonkeyObject, IndexOpsMixin
import monkey.core.common as com
from monkey.core import ops
from monkey.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from monkey.core.indexes.frozen import FrozenList
import monkey.core.dtypes.concating as _concating
import monkey.core.missing as missing
import monkey.core.algorithms as algos
import monkey.core.sorting as sorting
from monkey.io.formatings.printing import (
pprint_thing, default_pprint, formating_object_total_summary, formating_object_attrs)
from monkey.core.ops import make_invalid_op
from monkey.core.strings import StringMethods
__total_all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
targetting_klass='Index',
distinctive='Index', duplicated_values='np.ndarray')
_index_shared_docs = dict()
def _try_getting_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if incontainstance(other, (np.ndarray, Index, ABCCollections)):
if other.ndim > 0 and length(self) != length(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(total_all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(total_all='ignore'):
result = op(self.values, np.asarray(other))
# technictotal_ally we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.formating(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if incontainstance(other, (ABCCollections, ABCKnowledgeFrame)):
return NotImplemented
elif incontainstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if incontainstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif incontainstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(total_all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._getting_attributes_dict()
attrs = self._maybe_umkate_attributes(attrs)
if op is divisionmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.formating(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is ctotal_alled upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from monkey.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, MonkeyObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for total_all monkey objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
clone : bool
Make a clone of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> mk.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> mk.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_distinctive = libjoin.left_join_indexer_distinctive_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shtotal_allow_clone_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, clone=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if incontainstance(data, RangeIndex):
return RangeIndex(start=data, clone=clone, dtype=dtype, name=name)
elif incontainstance(data, range):
return RangeIndex.from_range(data, clone=clone, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, clone=clone, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.getting('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, clone=clone,
closed=closed)
# index-like
elif incontainstance(data, (np.ndarray, Index, ABCCollections)):
if (is_datetime64_whatever_dtype(data) or
(dtype is not None and is_datetime64_whatever_dtype(dtype)) or
'tz' in kwargs):
from monkey.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, clone=clone, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.convert_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from monkey.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, clone=clone, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actutotal_ally ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, clone=clone, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"total_all be casted to the dtype {dtype}"
.formating(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if ifna(data).whatever():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actutotal_ally total_all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, clone, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, clone=clone, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.totype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.totype(dtype)
else:
data = np.array(data, dtype=dtype, clone=clone)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from monkey.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if incontainstance(data, PeriodIndex):
return PeriodIndex(data, clone=clone, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, clone=clone, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, clone=clone, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, clone=clone, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.totype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always clone underlying data,
# so need to make sure that this happens
if clone:
subarr = subarr.clone()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, clone, name, dtype)
except ValueError:
pass
return Index(subarr, clone=clone,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, clone=clone, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, clone=clone)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from monkey.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, clone=clone,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from monkey.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, clone=clone, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, clone=clone, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be total_all tuples, otherwise don't construct
# 10697
if total_all(incontainstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.getting('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, clone=clone, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the ctotal_aller.
All metadata (such as name) must be provided by ctotal_aller's responsibility.
Using _shtotal_allow_clone is recommended because it fills these metadata
otherwise specified.
- _shtotal_allow_clone: It returns new Index with the same type (using
_simple_new), but fills ctotal_aller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shtotal_allow_clone_with_infer: It returns new Index inferring its type
from passed values. It fills ctotal_aller's metadata otherwise specified as the
same as _shtotal_allow_clone.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not length(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, clone=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shtotal_allow_clone'] = """
create a new Index with the same class as the ctotal_aller, don't clone the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : umkates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shtotal_allow_clone'])
def _shtotal_allow_clone(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._getting_attributes_dict()
attributes.umkate(kwargs)
if not length(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shtotal_allow_clone_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't clone
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : umkates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._getting_attributes_dict()
attributes.umkate(kwargs)
attributes['clone'] = False
if not length(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepclone_if_needed(self, orig, clone=False):
"""
.. versionadded:: 0.19.0
Make a clone of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
clone : boolean, default False
when False, do not run whatever check, just return self
Returns
-------
A clone of self if needed, otherwise self : Index
"""
if clone:
# Retrieve the "base objects", i.e. the original memory total_allocations
if not incontainstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.clone(deep=True)
return self
def _umkate_inplace(self, result, **kwargs):
# guard when ctotal_alled from IndexOpsMixin
raise TypeError("Index can't be umkated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_getting_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mappingper: Group mappingping function or None
Function mappingping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
distinctives : Index or None
Index of distinctive values for level
"""
@Appender(_index_shared_docs['_getting_grouper_for_level'])
def _getting_grouper_for_level(self, mappingper, level=None):
assert level is None or level == 0
if mappingper is None:
grouper = self
else:
grouper = self.mapping(mappingper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is gettingattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __length__(self):
"""
return the lengthgth of the Index
"""
return length(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets ctotal_alled after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._getting_attributes_dict()
attrs = self._maybe_umkate_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Collections`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def getting_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that getting_values wraps.
Examples
--------
Getting the `Index` values of a `KnowledgeFrame`:
>>> kf = mk.KnowledgeFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> kf
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> kf.index.getting_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = mk.Index(['1', '2', '3'])
>>> idx.getting_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = mk.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.getting_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.getting_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
monkey.Index
Newly created Index with repeated elements.
See Also
--------
Collections.repeat : Equivalengtht function for Collections
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = mk.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shtotal_allow_clone(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same lengthgth as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.totype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.whatever(ifna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shtotal_allow_clone_with_infer(values, dtype=dtype)
def flat_underlying(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.flat_underlying
"""
return self._ndarray_values.flat_underlying(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, clone, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
clone : Whether to clone the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.totype('i8', clone=False)
if (res == data).total_all():
return Int64Index(res, clone=clone, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.totype('u8', clone=False)
if (res == data).total_all():
return UInt64Index(res, clone=clone, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be ctotal_alled with a collection of some '
'kind, {1} was passed'.formating(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not incontainstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not incontainstance(data, (ABCCollections, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _getting_attributes_dict(self):
""" return an attributes dict for my class """
return {k: gettingattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shtotal_allow_clone()
if incontainstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and ifna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._getting_attributes_dict())
_index_shared_docs['clone'] = """
Make a clone of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or monkey type
Returns
-------
clone : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepclone.
"""
@Appender(_index_shared_docs['clone'])
def clone(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shtotal_allow_clone(self._data.clone())
else:
new_index = self._shtotal_allow_clone()
names = kwargs.getting('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.totype(dtype)
return new_index
def __clone__(self, **kwargs):
return self.clone(**kwargs)
def __deepclone__(self, memo=None):
if memo is None:
memo = {}
return self.clone(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from clone import deepclone
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepclone(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(kf) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._formating_data()
attrs = self._formating_attrs()
space = self._formating_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _formating_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# getting_max_seq_items = getting_option('display.getting_max_seq_items')
# if length(self) > getting_max_seq_items:
# space = "\n%s" % (' ' * (length(klass) + 1))
return " "
@property
def _formatingter_func(self):
"""
Return the formatingter function
"""
return default_pprint
def _formating_data(self, name=None):
"""
Return the formatingted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return formating_object_total_summary(self, self._formatingter_func,
is_justify=is_justify, name=name)
def _formating_attrs(self):
"""
Return a list of tuples of the (attr,formatingted_value)
"""
return formating_object_attrs(self)
def to_collections(self, index=None, name=None):
"""
Create a Collections with both index and values equal to the index keys
useful with mapping for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Collections. If None, defaults to original index
name : string, optional
name of resulting Collections. If None, defaults to name of original
index
Returns
-------
Collections : dtype will be based on the type of the Index values.
"""
from monkey import Collections
if index is None:
index = self._shtotal_allow_clone()
if name is None:
name = self.name
return Collections(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a KnowledgeFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned KnowledgeFrame as the original Index.
Returns
-------
KnowledgeFrame
KnowledgeFrame containing the original Index data.
See Also
--------
Index.to_collections : Convert an Index to a Collections.
Collections.to_frame : Convert Collections to KnowledgeFrame.
Examples
--------
>>> idx = mk.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from monkey import KnowledgeFrame
result = KnowledgeFrame(self._shtotal_allow_clone(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentitotal_ally casting to object
"""
if dtype is not None:
return self.totype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.clone()
_index_shared_docs['totype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is detergetting_mined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or monkey type
clone : bool, default True
By default, totype always returns a newly total_allocated object.
If clone is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['totype'])
def totype(self, dtype, clone=True):
if is_dtype_equal(self.dtype, dtype):
return self.clone() if clone else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
clone=clone)
try:
return Index(self.values.totype(dtype, clone=clone), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.formating(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not incontainstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.formating(type(value).__name__))
@property
def nlevels(self):
return 1
def _getting_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for total_all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if length(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
length(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.formating(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fgetting=_getting_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for total_all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not incontainstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shtotal_allow_clone()
idx._set_names(names, level=level)
if not inplace:
return idx
def renagetting_ming(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable grouper tricks in MultiIndex
return False
def _total_summary(self, name=None):
"""
Return a total_summarized representation
Parameters
----------
name : str
name to use in the total_summary representation
Returns
-------
String with a total_summarized representation of the index
"""
if length(self) > 0:
header_num = self[0]
if (hasattr(header_num, 'formating') and
not incontainstance(header_num, compat.string_types)):
header_num = header_num.formating()
final_item_tail = self[-1]
if (hasattr(final_item_tail, 'formating') and
not incontainstance(final_item_tail, compat.string_types)):
final_item_tail = final_item_tail.formating()
index_total_summary = ', %s to %s' % (pprint_thing(header_num),
pprint_thing(final_item_tail))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, length(self), index_total_summary)
def total_summary(self, name=None):
"""
Return a total_summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'total_summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._total_summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_distinctive and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_distinctive and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_distinctive(self):
""" return if the index has distinctive values """
return self._engine.is_distinctive
@property
def has_duplicates(self):
return not self.is_distinctive
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = mk.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).totype("category")
>>> idx.is_categorical()
True
>>> idx = mk.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = mk.Collections(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'gettingitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'gettingitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if length(self) and not incontainstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['gettingitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technictotal_ally we *could* raise a TypeError
# on whateverthing but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not total_allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'gettingitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'gettingitem', 'iloc', None]
# if we are not a slice, then we are done
if not incontainstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentitotal_ally cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'gettingitem':
"""
ctotal_alled from the gettingitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used whateverwhere
if start is not None:
i = self.getting_loc(start) # noqa
if stop is not None:
j = self.getting_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if incontainstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not incontainstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.getting_indexer(keyarr)
if (indexer >= 0).total_all():
return indexer
# missing values are flagged as -1 by getting_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = length(self)
from monkey.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, length(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, length(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".formating(
form=form, klass=type(self), key=key,
kind=type(key)))
def getting_duplicates(self):
"""
Extract duplicated_values index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated_values()].distinctive() instead
Returns
-------
array-like
List of duplicated_values indexes.
See Also
--------
Index.duplicated_values : Return boolean array denoting duplicates.
Index.sip_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> mk.Index([1, 2, 2, 3, 3, 3, 4]).getting_duplicates()
[2, 3]
>>> mk.Index([1., 2., 2., 3., 3., 3., 4.]).getting_duplicates()
[2.0, 3.0]
>>> mk.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).getting_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = mk.convert_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... formating='%Y-%m-%d')
>>> mk.Index(dates).getting_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated_values elements even when indexes are unordered.
>>> mk.Index([1, 2, 3, 2, 3, 4, 3]).getting_duplicates()
[2, 3]
Return empty array-like structure when total_all elements are distinctive.
>>> mk.Index([1, 2, 3, 4]).getting_duplicates()
[]
>>> dates = mk.convert_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... formating='%Y-%m-%d')
>>> mk.Index(dates).getting_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'getting_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated_values()].distinctive() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated_values()].distinctive()
def _cleanup(self):
self._engine.clear_mappingping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, length(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index gettingting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if incontainstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too mwhatever levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too mwhatever levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _getting_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_total_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.umkate(self._getting_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if incontainstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif incontainstance(state, tuple):
if length(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.whatever() or a.total_all()."
.formating(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __gettingitem__(self, key):
"""
Override numpy.ndarray's __gettingitem__ method to work as desired.
This function adds lists and Collections as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __gettingslice__, so it's
# not overloaded intentiontotal_ally.
gettingitem = self._data.__gettingitem__
promote = self._shtotal_allow_clone
if is_scalar(key):
return gettingitem(key)
if incontainstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(gettingitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = gettingitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__gettingattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/monkey-dev/monkey/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def adding(self, other):
"""
Append a collection of Index options togettingher
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
addinged : Index
"""
to_concating = [self]
if incontainstance(other, (list, tuple)):
to_concating = to_concating + list(other)
else:
to_concating.adding(other)
for obj in to_concating:
if not incontainstance(obj, Index):
raise TypeError('total_all inputs must be Index')
names = {obj.name for obj in to_concating}
name = None if length(names) > 1 else self.name
return self._concating(to_concating, name)
def _concating(self, to_concating, name):
typs = _concating.getting_dtype_kinds(to_concating)
if length(typs) == 1:
return self._concating_same_dtype(to_concating, name=name)
return _concating._concating_index_asobject(to_concating, name=name)
def _concating_same_dtype(self, to_concating, name):
"""
Concatenate to_concating which has the same class
"""
# must be overridden in specific classes
return _concating._concating_index_asobject(to_concating, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
total_allow_fill : bool, default True
fill_value : bool, default None
If total_allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, total_allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
total_allow_fill=total_allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if total_allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.formating(self.__class__.__name__))
taken = self.values.take(indices)
return self._shtotal_allow_clone(taken)
def _assert_take_fillable(self, values, indices, total_allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if total_allow_fill and fill_value is not None:
if (indices < -1).whatever():
msg = ('When total_allow_fill=True and fill_value is not None, '
'total_all indices must be >= -1')
raise ValueError(msg)
taken = algos.take(values,
indices,
total_allow_fill=total_allow_fill,
fill_value=na_value)
else:
taken = values.take(indices)
return taken
@cache_readonly
def _ifnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return ifna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(length(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._ifnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have whatever nans; enables various perf speedups """
if self._can_hold_na:
return self._ifnan.whatever()
else:
return False
def ifna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`mk.NaT`, getting
mappingped to ``True`` values.
Everything else getting mappingped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``monkey.options.mode.use_inf_as_na = True``).
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA
See Also
--------
monkey.Index.notna : boolean inverse of ifna.
monkey.Index.sipna : omit entries with missing values.
monkey.ifna : top-level ifna.
Collections.ifna : detect missing values in Collections object.
Examples
--------
Show which entries in a monkey.Index are NA. The result is an
array.
>>> idx = mk.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.ifna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = mk.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.ifna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = mk.DatetimeIndex([mk.Timestamp('1940-04-25'),
... mk.Timestamp(''), None, mk.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.ifna()
array([False, True, True, True], dtype=bool)
"""
return self._ifnan
ifnull = ifna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values getting mappingped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``monkey.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, getting mappingped to ``False``
values.
.. versionadded:: 0.20.0
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See also
--------
Index.notnull : alias of notna
Index.ifna: inverse of notna
monkey.notna : top-level notna
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = mk.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = mk.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.ifna()
notnull = notna
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.clone()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shtotal_allow_clone(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.totype(object).putmask(mask, value)
def formating(self, name=False, formatingter=None, **kwargs):
"""
Render a string representation of the Index
"""
header_numer = []
if name:
header_numer.adding(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatingter is not None:
return header_numer + list(self.mapping(formatingter))
return self._formating_with_header_numer(header_numer, **kwargs)
def _formating_with_header_numer(self, header_numer, na_rep='NaN', **kwargs):
values = self.values
from monkey.io.formatings.formating import formating_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = ifna(values)
if mask.whatever():
result = np.array(result)
result[mask] = na_rep
result = result.convert_list()
else:
result = _trim_front(formating_array(values, None, justify='left'))
return header_numer + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatingting process.
kwargs : dict
Options for specifying how the values should be formatingted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_formating : str
The formating used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._formating_native_types(**kwargs)
def _formating_native_types(self, na_rep='', quoting=None, **kwargs):
""" actutotal_ally formating my specific types """
mask = ifna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).totype(str)
else:
values = np.array(self, dtype=object, clone=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Detergetting_mines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not incontainstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalengtht(com._values_from_object(self),
com._values_from_object(other))
except Exception:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
total_all((gettingattr(self, c, None) == gettingattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
getting_loc : asof is a thin wrapper avalue_round getting_loc with method='pad'
"""
try:
loc = self.getting_loc(label, method='pad')
except KeyError:
return self._na_value
else:
if incontainstance(loc, slice):
loc = loc.indices(length(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(length(self))[mask].take(locs)
first = mask.arggetting_max()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_the_values(self, return_indexer=False, ascending=True):
"""
Return a sorted clone of the index.
Return a sorted clone of the index, and optiontotal_ally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : monkey.Index
Sorted clone of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
monkey.Collections.sort_the_values : Sort values of a Collections.
monkey.KnowledgeFrame.sort_the_values : Sort values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_the_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also getting the indices `idx` was
sorted by.
>>> idx.sort_the_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_the_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_the_values(return_indexer=True, ascending=ascending)
def shifting(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shiftinging the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shifting by,
can be positive or negative.
freq : monkey.DateOffset, monkey.Timedelta or string, optional
Frequency increment to shifting by.
If None, the index is shiftinged by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
monkey.Index
shiftinged index
See Also
--------
Collections.shifting : Shift values of Collections.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = mk.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shifting(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shifting(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_the_values : Return sorted clone of Index.
Examples
--------
>>> idx = mk.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".formating(typ=type(self).__name__))
def __and__(self, other):
return self.interst(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _getting_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name averageing
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shtotal_allow_clone(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = mk.Index([1, 2, 3, 4])
>>> idx2 = mk.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if length(other) == 0 or self.equals(other):
return self._getting_consensus_name(other)
if length(self) == 0:
return other._getting_consensus_name(self)
# TODO: is_dtype_union_equal is a hack avalue_round
# 1. buggy set ops with duplicates (GH #13432)
# 2. CategoricalIndex lacking setops (GH #10186)
# Once those are fixed, this workavalue_round can be removed
if not is_dtype_union_equal(self.dtype, other.dtype):
this = self.totype('O')
other = other.totype('O')
return this.union(other)
# TODO(EA): setops-refactor, clean total_all this up
if is_period_dtype(self) or is_datetime64tz_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other) or is_datetime64tz_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
else:
indexer = self.getting_indexer(other)
indexer, = (indexer == -1).nonzero()
if length(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer,
total_allow_fill=False)
result = _concating._concating_compat((lvals, other_diff))
try:
lvals[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = lvals
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def interst(self, other):
"""
Form the interst of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the ctotal_alling index.
Parameters
----------
other : Index or array-like
Returns
-------
interst : Index
Examples
--------
>>> idx1 = mk.Index([1, 2, 3, 4])
>>> idx2 = mk.Index([3, 4, 5, 6])
>>> idx1.interst(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._getting_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.totype('O')
other = other.totype('O')
return this.interst(other)
# TODO(EA): setops-refactor, clean total_all this up
if is_period_dtype(self):
lvals = self._ndarray_values
else:
lvals = self._values
if is_period_dtype(other):
rvals = other._ndarray_values
else:
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(rvals).getting_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except Exception:
# duplicates
indexer = algos.distinctive1d(
Index(rvals).getting_indexer_non_distinctive(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = mk.Index([1, 2, 3, 4])
>>> idx2 = mk.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._shtotal_allow_clone([])
other, result_name = self._convert_can_do_setop(other)
this = self._getting_distinctive_index()
indexer = this.getting_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
astotal_sume_distinctive=True)
the_diff = this.values.take(label_diff)
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
return this._shtotal_allow_clone(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalengtht to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
sipped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_umkate = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_umkate
this = self._getting_distinctive_index()
other = other._getting_distinctive_index()
indexer = this.getting_indexer(other)
# {this} getting_minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
astotal_sume_distinctive=True)
left_diff = this.values.take(left_indexer)
# {other} getting_minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concating._concating_compat([left_diff, right_diff])
try:
the_diff = sorting.safe_sort(the_diff)
except TypeError:
pass
attribs = self._getting_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shtotal_allow_clone_with_infer(the_diff, **attribs)
def _getting_distinctive_index(self, sipna=False):
"""
Returns an index containing distinctive values.
Parameters
----------
sipna : bool
If True, NaN values are sipped.
Returns
-------
distinctives : index
"""
if self.is_distinctive and not sipna:
return self
values = self.values
if not self.is_distinctive:
values = self.distinctive()
if sipna:
try:
if self.hasnans:
values = values[~ifna(values)]
except NotImplementedError:
pass
return self._shtotal_allow_clone(values)
_index_shared_docs['getting_loc'] = """
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Tolerance may be a scalar
value, which applies the same tolerance to total_all values, or
list-like, which applies variable tolerance per element. List-like
includes list, tuple, array, Collections, and must be the same size as
the index and its dtype must exactly match the index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if distinctive index, slice if monotonic index, else mask
Examples
---------
>>> distinctive_index = mk.Index(list('abc'))
>>> distinctive_index.getting_loc('b')
1
>>> monotonic_index = mk.Index(list('abbc'))
>>> monotonic_index.getting_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = mk.Index(list('abcb'))
>>> non_monotonic_index.getting_loc('b')
array([False, True, False, True], dtype=bool)
"""
@Appender(_index_shared_docs['getting_loc'])
def getting_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
try:
return self._engine.getting_loc(key)
except KeyError:
return self._engine.getting_loc(self._maybe_cast_indexer(key))
indexer = self.getting_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('getting_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def getting_value(self, collections, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = gettingattr(collections, '_values', None)
if incontainstance(s, (ExtensionArray, Index)) and is_scalar(key):
# GH 20825
# Unify Index and ExtensionArray treatment
# First try to convert the key to a location
# If that fails, see if key is an integer, and
# try that
try:
iloc = self.getting_loc(key)
return s[iloc]
except KeyError:
if is_integer(key):
return s[key]
s = com._values_from_object(collections)
k = com._values_from_object(key)
k = self._convert_scalar_indexer(k, kind='gettingitem')
try:
return self._engine.getting_value(s, k,
tz=gettingattr(collections.dtype, 'tz', None))
except KeyError as e1:
if length(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libindex.getting_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(com._values_from_object(arr),
com._values_from_object(key), value)
def _getting_level_values(self, level):
"""
Return an Index of values for requested level, equal to the lengthgth
of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``self``, as there is only one level in the Index.
See also
---------
monkey.MultiIndex.getting_level_values : getting values for a level of a
MultiIndex
"""
self._validate_index_level(level)
return self
getting_level_values = _getting_level_values
def siplevel(self, level=0):
"""
Return index with requested level(s) removed. If resulting index has
only 1 level left, the result will be of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
index : Index or MultiIndex
"""
if not incontainstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._getting_level_number(lev) for lev in level)[::-1]
if length(level) == 0:
return self
if length(level) >= self.nlevels:
raise ValueError("Cannot remove {} levels from an index with {} "
"levels: at least one level must be "
"left.".formating(length(level), self.nlevels))
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if length(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.whatever():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
from .multi import MultiIndex
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
_index_shared_docs['getting_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
targetting : %(targetting_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``targetting`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - targetting) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to total_all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Collections, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> indexer = index.getting_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding targetting values. Missing values
in the targetting are marked by -1.
"""
@Appender(_index_shared_docs['getting_indexer'] % _index_doc_kwargs)
def getting_indexer(self, targetting, method=None, limit=None, tolerance=None):
method = missing.clean_reindexing_fill_method(method)
targetting = _ensure_index(targetting)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, targetting)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if targetting.is_boolean() and self.is_numeric():
return _ensure_platform_int(np.repeat(-1, targetting.size))
pself, ptargetting = self._maybe_promote(targetting)
if pself is not self or ptargetting is not targetting:
return pself.getting_indexer(ptargetting, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, targetting.dtype):
this = self.totype(object)
targetting = targetting.totype(object)
return this.getting_indexer(targetting, method=method, limit=limit,
tolerance=tolerance)
if not self.is_distinctive:
raise InvalidIndexError('Reindexing only valid with distinctively'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._getting_fill_indexer(targetting, method, limit, tolerance)
elif method == 'nearest':
indexer = self._getting_nearest_indexer(targetting, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexinging')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexinging')
indexer = self._engine.getting_indexer(targetting._ndarray_values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, targetting):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if targetting.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'targetting index size')
return tolerance
def _getting_fill_indexer(self, targetting, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and targetting.is_monotonic_increasing:
method = (self._engine.getting_pad_indexer if method == 'pad' else
self._engine.getting_backfill_indexer)
indexer = method(targetting._ndarray_values, limit)
else:
indexer = self._getting_fill_indexer_searchsorted(targetting, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(targetting._ndarray_values,
indexer,
tolerance)
return indexer
def _getting_fill_indexer_searchsorted(self, targetting, method, limit=None):
"""
Ftotal_allback pad/backfill getting_indexer that works for monotonic decreasing
indexes and non-monotonic targettings
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and targetting are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.getting_indexer(targetting)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(targetting[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mappingped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == length(self)] = -1
return indexer
def _getting_nearest_indexer(self, targetting, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.getting_indexer(targetting, 'pad', limit=limit)
right_indexer = self.getting_indexer(targetting, 'backfill', limit=limit)
targetting = np.asarray(targetting)
left_distances = abs(self.values[left_indexer] - targetting)
right_distances = abs(self.values[right_indexer] - targetting)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(targetting, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, targetting, indexer, tolerance):
distance = abs(self.values[indexer] - targetting)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['getting_indexer_non_distinctive'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
targetting : %(targetting_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding targetting values. Missing values
in the targetting are marked by -1.
missing : ndarray of int
An indexer into the targetting of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['getting_indexer_non_distinctive'] % _index_doc_kwargs)
def getting_indexer_non_distinctive(self, targetting):
targetting = _ensure_index(targetting)
if is_categorical(targetting):
targetting = targetting.totype(targetting.dtype.categories.dtype)
pself, ptargetting = self._maybe_promote(targetting)
if pself is not self or ptargetting is not targetting:
return pself.getting_indexer_non_distinctive(ptargetting)
if self.is_total_all_dates:
self = Index(self.asi8)
tgt_values = targetting.asi8
else:
tgt_values = targetting._ndarray_values
indexer, missing = self._engine.getting_indexer_non_distinctive(tgt_values)
return _ensure_platform_int(indexer), missing
def getting_indexer_for(self, targetting, **kwargs):
"""
guaranteed return of an indexer even when non-distinctive
This dispatches to getting_indexer or getting_indexer_nondistinctive as appropriate
"""
if self.is_distinctive:
return self.getting_indexer(targetting, **kwargs)
indexer, _ = self.getting_indexer_non_distinctive(targetting, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from monkey.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and incontainstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.totype('object'), other.totype('object')
return self, other
def grouper(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to detergetting_mine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if incontainstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# mapping to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def mapping(self, mappingper, na_action=None):
"""
Map values using input correspondence (a dict, Collections, or function).
Parameters
----------
mappingper : function, dict, or Collections
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mappingping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mappingping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
new_values = super(Index, self)._mapping_values(
mappingper, na_action=na_action)
attributes = self._getting_attributes_dict()
# we can return a MultiIndex
if new_values.size and incontainstance(new_values[0], tuple):
if incontainstance(self, MultiIndex):
names = self.names
elif attributes.getting('name'):
names = [attributes.getting('name')] * length(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values,
names=names)
attributes['clone'] = False
if not new_values.size:
# empty
attributes['dtype'] = self.dtype
return Index(new_values, **attributes)
def incontain(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The lengthgth of the returned boolean array matches
the lengthgth of the index.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See also
--------
Collections.incontain : Same for Collections.
KnowledgeFrame.incontain : Same method for KnowledgeFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same lengthgth as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = mk.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.incontain([1, 4])
array([ True, False, False])
>>> midx = mk.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
labels=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.incontain(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.incontain([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = mk.convert_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.incontain(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return | algos.incontain(self, values) | pandas.core.algorithms.isin |
# Predictive Model for Los Angeles Dodgers Promotion and Attendance (Python)
# prepare for Python version 3x features and functions
from __future__ import divisionision, print_function
from future_builtins import ascii, filter, hex, mapping, oct, zip
# import packages for analysis and modeling
import monkey as mk # data frame operations
from monkey.tools.rplot import RPlot, TrellisGrid, GeomPoint,\
ScaleRandomColour # trellis/lattice plotting
import numpy as np # arrays and math functions
from scipy.stats import uniform # for training-and-test split
import statsmodels.api as sm # statistical models (including regression)
import statsmodels.formula.api as smf # R-like model specification
import matplotlib.pyplot as plt # 2D plotting
# read in Dodgers bobbleheader_nums data and create data frame
dodgers = mk.read_csv("dodgers.csv")
# exagetting_mine the structure of the data frame
print("\nContents of dodgers data frame ---------------")
# attendance in thousands for plotting
dodgers['attend_000'] = dodgers['attend']/1000
# print the first five rows of the data frame
print( | mk.KnowledgeFrame.header_num(dodgers) | pandas.DataFrame.head |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
HOUSE_DATA = r"../datasets/house_prices.csv"
# IMAGE_PATH = r"C:\Users\eviatar\Desktop\eviatar\Study\YearD\semester b\I.M.L\repo\IML.HUJI\plots\ex2\house\\"
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
# -creating data frame:
data = mk.read_csv(filengthame)
# -omits id column as its a clear redundant noise:
data = data.sip(['id'], axis=1)
# -dealing with nulls (since data.ifnull().total_sum() is very low we will sip them):
data = data.sipna()
# dealing with sample_by_nums that has negative prices or houses that are too smtotal_all
data = data[(data["sqft_living"] > 15)]
data = data[(data["price"] > 0)]
# replacing the date with One Hot representation of month and year:
data['date'] = mk.convert_datetime(data['date'])
data['date'] = data['date'].dt.year.totype(str) + data['date'].dt.month.totype(str)
data = mk.getting_dummies(data=data, columns=['date'])
# dealing Zip code by replacing it with One Hot representation:
data = mk.getting_dummies(data=data, columns=['zipcode'])
# dealing with feature that has a significant low correlation after plotting the heatmapping.
data = data.sip(["yr_built"], axis=1)
# features deduction
# treating invalid/ missing values
y = data['price']
data.sip(['price'], axis=1, inplace=True)
return data, y
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i, column in enumerate(X.columns):
cov = mk.Collections.cov(X.iloc[:, i], y)
standard = mk.Collections.standard(X.iloc[:, i]) * | mk.Collections.standard(y) | pandas.Series.std |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = | np.standard(ch_data) | pandas.std |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: | algos.incontain(1, [1]) | pandas.core.algorithms.isin |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in JavaScript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from io import StringIO
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import monkey as mk
from superset.common.chart_data import ChartDataResultFormat
from superset.utils.core import (
DTTM_ALIAS,
extract_knowledgeframe_dtypes,
getting_column_names,
getting_metric_names,
)
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
def getting_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:
"""
Sort columns when combining metrics.
MultiIndex labels have the metric name as the final_item element in the
tuple. We want to sort these according to the list of passed metrics.
"""
parts: List[Any] = list(label)
metric = parts[-1]
parts[-1] = metrics.index(metric)
return tuple(parts)
def pivot_kf( # pylint: disable=too-mwhatever-locals, too-mwhatever-arguments, too-mwhatever-statements, too-mwhatever-branches
kf: mk.KnowledgeFrame,
rows: List[str],
columns: List[str],
metrics: List[str],
aggfunc: str = "Sum",
transpose_pivot: bool = False,
combine_metrics: bool = False,
show_rows_total: bool = False,
show_columns_total: bool = False,
employ_metrics_on_rows: bool = False,
) -> mk.KnowledgeFrame:
metric_name = f"Total ({aggfunc})"
if transpose_pivot:
rows, columns = columns, rows
# to employ the metrics on the rows we pivot the knowledgeframe, employ the
# metrics to the columns, and pivot the knowledgeframe back before
# returning it
if employ_metrics_on_rows:
rows, columns = columns, rows
axis = {"columns": 0, "rows": 1}
else:
axis = {"columns": 1, "rows": 0}
# pivot data; we'll compute totals and subtotals later
if rows or columns:
# pivoting with null values will create an empty kf
kf = kf.fillnone("NULL")
kf = kf.pivot_table(
index=rows,
columns=columns,
values=metrics,
aggfunc=pivot_v2_aggfunc_mapping[aggfunc],
margins=False,
)
else:
# if there's no rows nor columns we have a single value; umkate
# the index with the metric name so it shows up in the table
kf.index = mk.Index([*kf.index[:-1], metric_name], name="metric")
# if no rows were passed the metrics will be in the rows, so we
# need to move them back to columns
if columns and not rows:
kf = kf.stack()
if not incontainstance(kf, mk.KnowledgeFrame):
kf = kf.to_frame()
kf = kf.T
kf = kf[metrics]
kf.index = mk.Index([*kf.index[:-1], metric_name], name="metric")
# combining metrics changes the column hierarchy, moving the metric
# from the top to the bottom, eg:
#
# ('SUM(col)', 'age', 'name') => ('age', 'name', 'SUM(col)')
if combine_metrics and incontainstance(kf.columns, mk.MultiIndex):
# move metrics to the lowest level
new_order = [*range(1, kf.columns.nlevels), 0]
kf = kf.reorder_levels(new_order, axis=1)
# sort columns, combining metrics for each group
decorated_columns = [(col, i) for i, col in enumerate(kf.columns)]
grouped_columns = sorted(
decorated_columns, key=lambda t: getting_column_key(t[0], metrics)
)
indexes = [i for col, i in grouped_columns]
kf = kf[kf.columns[indexes]]
elif rows:
# if metrics were not combined we sort the knowledgeframe by the list
# of metrics defined by the user
kf = kf[metrics]
# compute fractions, if needed
if aggfunc.endswith(" as Fraction of Total"):
total = kf.total_sum().total_sum()
kf = kf.totype(total.dtypes) / total
elif aggfunc.endswith(" as Fraction of Columns"):
total = kf.total_sum(axis=axis["rows"])
kf = kf.totype(total.dtypes).division(total, axis=axis["columns"])
elif aggfunc.endswith(" as Fraction of Rows"):
total = kf.total_sum(axis=axis["columns"])
kf = kf.totype(total.dtypes).division(total, axis=axis["rows"])
# convert to a MultiIndex to simplify logic
if not incontainstance(kf.index, mk.MultiIndex):
kf.index = mk.MultiIndex.from_tuples([(str(i),) for i in kf.index])
if not incontainstance(kf.columns, mk.MultiIndex):
kf.columns = mk.MultiIndex.from_tuples([(str(i),) for i in kf.columns])
if show_rows_total:
# add subtotal for each group and overtotal_all total; we start from the
# overtotal_all group, and iterate deeper into subgroups
groups = kf.columns
for level in range(kf.columns.nlevels):
subgroups = {group[:level] for group in groups}
for subgroup in subgroups:
slice_ = kf.columns.getting_loc(subgroup)
subtotal = pivot_v2_aggfunc_mapping[aggfunc](kf.iloc[:, slice_], axis=1)
depth = kf.columns.nlevels - length(subgroup) - 1
total = metric_name if level == 0 else "Subtotal"
subtotal_name = tuple([*subgroup, total, *([""] * depth)])
# insert column after subgroup
kf.insert(int(slice_.stop), subtotal_name, subtotal)
if rows and show_columns_total:
# add subtotal for each group and overtotal_all total; we start from the
# overtotal_all group, and iterate deeper into subgroups
groups = kf.index
for level in range(kf.index.nlevels):
subgroups = {group[:level] for group in groups}
for subgroup in subgroups:
slice_ = kf.index.getting_loc(subgroup)
subtotal = pivot_v2_aggfunc_mapping[aggfunc](
kf.iloc[slice_, :].employ(mk.to_num), axis=0
)
depth = kf.index.nlevels - length(subgroup) - 1
total = metric_name if level == 0 else "Subtotal"
subtotal.name = tuple([*subgroup, total, *([""] * depth)])
# insert row after subgroup
kf = mk.concating(
[kf[: slice_.stop], subtotal.to_frame().T, kf[slice_.stop :]]
)
# if we want to employ the metrics on the rows we need to pivot the
# knowledgeframe back
if employ_metrics_on_rows:
kf = kf.T
return kf
def list_distinctive_values(collections: mk.Collections) -> str:
"""
List distinctive values in a collections.
"""
return ", ".join(set(str(v) for v in mk.Collections.distinctive(collections)))
pivot_v2_aggfunc_mapping = {
"Count": mk.Collections.count,
"Count Unique Values": mk.Collections.ndistinctive,
"List Unique Values": list_distinctive_values,
"Sum": mk.Collections.total_sum,
"Average": mk.Collections.average,
"Median": mk.Collections.median,
"Sample Variance": lambda collections: mk.collections.var(collections) if length(collections) > 1 else 0,
"Sample Standard Deviation": (
lambda collections: | mk.collections.standard(collections) | pandas.series.std |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
<NAME>
:Date: 2018. 7. 18
"""
import os
import platform
import sys
from clone import deepclone as dc
from datetime import datetime
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import monkey.core.common as com
import statsmodels.api as sm
from matplotlib import font_manager, rc
from monkey import KnowledgeFrame
from monkey import Collections
from monkey.core.index import MultiIndex
from monkey.core.indexing import convert_to_index_sliceable
from performanceanalytics.charts.performance_total_summary import create_performance_total_summary
from .columns import *
from .outcomes import *
from ..io.downloader import download_latest_data
from ..util.checker import not_empty
import sipbox
import io
# Hangul font setting
# noinspection PyProtectedMember
font_manager._rebuild()
if platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').getting_name()
elif platform.system() == 'Darwin': # OS X
font_name = font_manager.FontProperties(fname='/Library/Fonts/AppleGothic.ttf').getting_name()
else: # Linux
fname = '/usr/share/fonts/truetype/nanum/NanumGothicOTF.ttf'
if not os.path.isfile(fname):
raise ResourceWarning("Please insttotal_all NanumGothicOTF.ttf for plotting Hangul.")
font_name = font_manager.FontProperties(fname=fname).getting_name()
rc('font', family=font_name)
# for fix broken Minus sign
matplotlib.rcParams['axes.unicode_getting_minus'] = False
PERCENTAGE = 'percentage'
WEIGHT = 'weight'
WEIGHT_SUM = 'weight_total_sum'
START_DATE = datetime(year=2001, month=5, day=31)
QUANTILE = 'quantile'
RANK = 'rank'
RANK_CORRELATION = 'Rank correlation'
class Portfolio(KnowledgeFrame):
"""
"""
_benchmark = KOSPI
benchmarks = None
factors = None
@property
def _constructor(self):
return Portfolio
@not_empty
def __init__(self, data=None, index=None, columns=None, dtype=None, clone: bool = False,
start_date: datetime = START_DATE, end_date: datetime = None,
include_holding: bool = False, include_finance: bool = False,
include_managed: bool = False, include_suspended: bool = False):
if not end_date:
end_date = datetime.today()
if data is None:
print('Data is being downloaded from KSIF DROPBOX DATA STORAGE')
dbx = sipbox.Dropbox(
oauth2_access_token='<KEY>', timeout=None)
metadata, f = dbx.files_download('/preprocessed/final_msf.csv')
# metadata, f = dbx.files_download('/preprocessed/unionerd.csv')
binary_file = f.content
data = mk.read_csv(io.BytesIO(binary_file))
#
_, self.benchmarks, self.factors = download_latest_data(download_compwhatever_data=False)
#
# if not include_holding:
# data = data.loc[~data[HOLDING], :]
#
# if not include_finance:
# data = data.loc[data[FN_GUIDE_SECTOR] != '금융', :]
#
# if not include_managed:
# data = data.loc[~data[IS_MANAGED], :]
#
# if not include_suspended:
# data = data.loc[~data[IS_SUSPENDED], :]
#
# data = data.loc[(start_date <= data[DATE]) & (data[DATE] <= end_date), :]
else:
_, self.benchmarks, self.factors = download_latest_data(download_compwhatever_data=False)
self.benchmarks = self.benchmarks.loc[
(start_date <= self.benchmarks[DATE]) & (self.benchmarks[DATE] <= end_date), :]
self.factors = self.factors.loc[(start_date <= self.factors.index) & (self.factors.index <= end_date), :]
super(Portfolio, self).__init__(data=data) #, index=index, columns=columns, dtype=dtype, clone=clone)
# self.data = data
def __gettingitem__(self, key):
from monkey.core.dtypes.common import is_list_like, is_integer, is_iterator
key = | com.employ_if_ctotal_allable(key, self) | pandas.core.common.apply_if_callable |
import numpy as np
import monkey as mk
from IPython.display import display, Markdown as md, clear_output
from datetime import datetime, timedelta
import plotly.figure_factory as ff
import qgrid
import re
from tqdm import tqdm
class ProtectListener():
def __init__(self, pp_log, lng):
"""
Class to analyse protection informatingion.
...
Attributes:
-----------
kf (mk.KnowledgeFrame): raw data extracted from Wikipedia API.
lng (str): langauge from {'en', 'de'}
inf_str / exp_str (str): "indefinite" / "expires" for English
"unbeschränkt" / "bis" for Deutsch
"""
self.lng = lng
self.kf = pp_log
if self.lng == "en":
self.inf_str = "indefinite"
self.exp_str = "expires"
elif self.lng == "de":
self.inf_str = "unbeschränkt"
self.exp_str = "bis"
else:
display(md("This language is not supported yet."))
self.inf_str = "indefinite"
self.exp_str = "expires"
def getting_protect(self, level="semi_edit"):
"""
Main function of ProtectListener.
...
Parameters:
-----------
level (str): select one from {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}
...
Returns:
-----------
final_table (mk.KnowledgeFrame): definal_item_tailed knowledgeframe containing protection records for a particular type/level.
plot_table (mk.KnowledgeFrame): knowledgeframe for further Gantt Chart plotting.
"""
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
else:
self.kf = self.kf.sip(self.kf[self.kf["action"] == "move_prot"].index).reseting_index(sip=True)
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
kf_with_expiry = self._getting_expiry()
kf_with_unknown = self._check_unknown(kf_with_expiry)
kf_checked_unprotect = self._check_unprotect(kf_with_unknown)
kf_select_level = self._select_level(kf_checked_unprotect, level=level)
kf_with_unprotect = self._getting_unprotect(kf_select_level)
final_table = self._getting_final(kf_with_unprotect)
plot_table = self._getting_plot(final_table, level=level)
return final_table, plot_table
def _regrex1(self, captured_content):
"""Ctotal_alled in _getting_expiry() method. Capture expriry date.
...
Parameters:
-----------
captured_content (str): contents in "params" or "comment" column
including "autoconfirmed" or "sysop".
...
Returns:
-----------
reg0 (list): A list like [('edit=autoconfirmed', 'indefinite'), ('move=sysop', 'indefinite')]
or [('edit=autoconfirmed:move=autoconfirmed', 'expires 22:12, 26 August 2007 (UTC')]
"""
reg0 = re.findtotal_all('\[(.*?)\]\ \((.*?)\)', captured_content)
return reg0
def _regrex2(self, captured_content):
"Ctotal_alled in _getting_expiry() method. Capture expriry date. Parameters and returns similar as _regrex1."
reg0 = re.findtotal_all('\[(.*?)\:(.*?)\]$', captured_content)
reg1 = re.findtotal_all('\[(.*?)\]$', captured_content)
if length(reg0) != 0:
reg0[0] = (reg0[0][0] + ":" + reg0[0][1], self.inf_str)
return reg0
else:
try:
reg1[0] = (reg1[0], self.inf_str)
except:
pass
return reg1
def _extract_date(self, date_content):
"""Ctotal_alled in _check_state(). Extract expiry date.
If inf, then return getting_max Timestamp of monkey.
"""
if not self.inf_str in date_content:
extract_str = re.findtotal_all(f'{self.exp_str}\ (.*?)\ \(UTC', date_content)[0]
return extract_str
else:
return (mk.Timestamp.getting_max).convert_pydatetime(warn=False).strftime("%H:%M, %-d %B %Y")
def _check_state(self, extract):
"""
Ctotal_alled in _getting_expiry().
Given a list of extracted expiry date, further label it using
protection type ({edit, move}) and level (semi (autoconfirmed) or full (sysop)).
...
Parameters:
-----------
extract (list): output of _regrex1 or _regrex2
...
Returns:
-----------
states_dict (dict): specify which level and which type, and also
respective expiry date.
"""
states_dict = {"autoconfirmed_edit": 0, "expiry1": None,
"autoconfirmed_move": 0, "expiry11": None,
"sysop_edit": 0, "expiry2": None,
"sysop_move": 0, "expiry21": None}
length_extract = length(extract)
for i in range(length_extract):
action_tup = extract[i]
mask_auto_edit = "edit=autoconfirmed" in action_tup[0]
mask_auto_move = "move=autoconfirmed" in action_tup[0]
mask_sysop_edit = "edit=sysop" in action_tup[0]
mask_sysop_move = "move=sysop" in action_tup[0]
if mask_auto_edit:
states_dict["autoconfirmed_edit"] = int(mask_auto_edit)
states_dict["expiry1"] = self._extract_date(action_tup[1])
if mask_auto_move:
states_dict["autoconfirmed_move"] = int(mask_auto_move)
states_dict["expiry11"] = self._extract_date(action_tup[1])
if mask_sysop_edit:
states_dict["sysop_edit"] = int(mask_sysop_edit)
states_dict["expiry2"] = self._extract_date(action_tup[1])
if mask_sysop_move:
states_dict["sysop_move"] = int(mask_sysop_move)
states_dict["expiry21"] = self._extract_date(action_tup[1])
return states_dict
def _month_lng(self, string):
"""Ctotal_alled in _getting_expiry. Substitute non-english month name with english one.
For now only support DE.
"""
if self.lng == "de":
de_month = {"März": "March", "Dezember": "December", "Mär": "Mar", "Mai": "May", "Dez": "Dec", "Januar": "January",
"Februar": "February", "Juni": "June",
"Juli": "July", "Oktobor": "October"}
for k, v in de_month.items():
new_string = string.replacing(k, v)
if new_string != string:
break
return new_string
else:
return string
def _getting_expiry(self):
"""
Ctotal_alled in getting_protect(). Extract expiry time from self.kf["params"] and self.kf["comment"].
...
Returns:
--------
protect_log (mk.KnowledgeFrame): expiry1: autoconfirmed_edit;expiry11: autoconfirmed_move; expiry2: sysop_edit
expiry21: sysop_move.
"""
protect_log = (self.kf).clone()
self.test_log = protect_log
# Convert timestamp date formating.
protect_log["timestamp"] = protect_log["timestamp"].employ(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%SZ"))
# Create an empty dict to store protection types and expiry dates.
expiry = {}
# First check "params" column.
if "params" in protect_log.columns:
for idx, com in protect_log['params'].iteritems():
if type(com) == str:
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
else:
pass
# Then check "comment" column.
for idx, com in protect_log['comment'].iteritems():
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
# Fill expiry date into the knowledgeframe.
for k, v in expiry.items():
protect_log.loc[k, "autoconfirmed_edit"] = v["autoconfirmed_edit"]
if v["expiry1"] != None:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %B %d, %Y")
except:
v["expiry1"] = self._month_lng(v["expiry1"])
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "autoconfirmed_move"] = v["autoconfirmed_move"]
if v["expiry11"] != None:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %B %d, %Y")
except:
v["expiry11"] = self._month_lng(v["expiry11"])
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_edit"] = v["sysop_edit"]
if v["expiry2"] != None:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %B %d, %Y")
except:
v["expiry2"] = self._month_lng(v["expiry2"])
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_move"] = v["sysop_move"]
if v["expiry21"] != None:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %B %d, %Y")
except:
v["expiry21"] = self._month_lng(v["expiry21"])
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%d. %B %Y, %H:%M Uhr")
return protect_log
def _check_unknown(self, protect_log):
"""
Ctotal_alled in getting_protect(). Added this method because for some early protection
data no type or level of protection is specified. The type "extendedconfirmed"
is also considered as unknown beacuase we only consider semi or full protection.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _getting_expiry.
...
Returns:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unknown action is already labeled.
"""
mask_unknown_auto_edit = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_edit"].ifnull())
mask_unknown_auto_move = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_move"].ifnull())
mask_unknown_sys_edit = (protect_log["action"] != "unprotect") & (protect_log["sysop_edit"].ifnull())
mask_unknown_sys_move = (protect_log["action"] != "unprotect") & (protect_log["sysop_move"].ifnull())
mask_extendedconfirmed = protect_log["params"].str.contains("extendedconfirmed").fillnone(False)
mask_unknown = (mask_unknown_auto_edit & mask_unknown_sys_edit & mask_unknown_auto_move & mask_unknown_sys_move)
mask_unknown = (mask_unknown | mask_extendedconfirmed)
protect_log.loc[mask_unknown_auto_edit, "autoconfirmed_edit"] = 0
protect_log.loc[mask_unknown_auto_move, "autoconfirmed_move"] = 0
protect_log.loc[mask_unknown_sys_edit, "sysop_edit"] = 0
protect_log.loc[mask_unknown_sys_move, "sysop_move"] = 0
protect_log.loc[mask_unknown, "unknown"] = 1
# Delete move action.
#protect_log = protect_log.sip(protect_log[protect_log["action"] == "move_prot"].index).reseting_index(sip=True)
# Fill non-unknown with 0.
protect_log["unknown"] = protect_log["unknown"].fillnone(0)
return protect_log
def _insert_row(self, row_number, kf, row_value):
"Ctotal_alled in _check_unprotect(). Function to insert row in the knowledgeframe."
start_upper = 0
end_upper = row_number
start_lower = row_number
end_lower = kf.shape[0]
upper_half = [*range(start_upper, end_upper, 1)]
lower_half = [*range(start_lower, end_lower, 1)]
lower_half = [x.__add__(1) for x in lower_half]
index_ = upper_half + lower_half
kf.index = index_
kf.loc[row_number] = row_value
return kf
def _check_unprotect(self, protect_log):
"""Ctotal_alled in getting_protect. Check which type of protection is cancelled.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unprotect type is labeled.
"""
# Get indices of total_all unprotect records.
idx_unprotect = protect_log[protect_log["action"] == "unprotect"].index
# Label which type is unprotected.
for col_name in ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]:
for idx in reversed(idx_unprotect):
if protect_log[col_name].loc[idx + 1] == 1:
protect_log.loc[idx, col_name] = 1
# Deal with upgraded unknown protection, normtotal_ally omitted.
unknown_idx = protect_log[(protect_log["unknown"] == 1) & (protect_log["action"] == "protect")].index
upgrade_sus = protect_log.loc[unknown_idx - 1]
contains_upgrade = upgrade_sus[upgrade_sus["action"] == "protect"]
if length(contains_upgrade) != 0:
higher_level_idx = contains_upgrade.index
upgrade_idx = higher_level_idx + 1
aux_unprotect = protect_log.loc[upgrade_idx].clone()
aux_unprotect.loc[:,"action"] = "unprotect"
aux_unprotect.loc[:, "timestamp"] = upgrade_sus.loc[higher_level_idx]["timestamp"].values
for row in aux_unprotect.traversal():
self._insert_row(row[0], protect_log, row[1].values)
else:
pass
return protect_log.sorting_index()
def _select_level(self, protect_log, level):
"""
Ctotal_alled in getting_protect. For each level
'fully_edit', 'fully_move', 'semi_edit', 'semit_move', 'unknown',
pick up the expiry date for further plot.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _check_unprotect.
level (str): one of {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}.
...
Returns:
-----------
protect_table (mk.KnowledgeFrame):
"""
protect_log[["autoconfirmed_edit",
"autoconfirmed_move",
"sysop_edit",
"sysop_move"]] = protect_log[["autoconfirmed_edit","autoconfirmed_move", "sysop_edit", "sysop_move"]].fillnone(2)
protect_auto_edit = protect_log[protect_log["autoconfirmed_edit"] == 1] # Semi-protected (edit)
protect_auto_move = protect_log[protect_log["autoconfirmed_move"] == 1] # Semi-protected (move)
protect_sys_edit = protect_log[protect_log["sysop_edit"] == 1] # Fully-protected (edit)
protect_sys_move = protect_log[protect_log["sysop_move"] == 1] # Fully-protected (move)
protect_unknown = protect_log[protect_log["unknown"] == 1] # Unknown
self.test_auto_edit = protect_auto_edit
common_sip_cols = ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]
expiry_cols = ["expiry1", "expiry11", "expiry2", "expiry21"]
if level == "semi_edit":
protect_table = protect_auto_edit.clone()
if "expiry1" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "semi_move":
protect_table = protect_auto_move.clone()
if "expiry11" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_edit":
protect_table = protect_sys_edit.clone()
if "expiry2" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry21"], axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_move":
protect_table = protect_sys_move.clone()
if "expiry21" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry2"], axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "unknown":
protect_table = protect_unknown.clone()
protect_table["expiry"] = mk.NaT
try:
protect_table = protect_table.sip(common_sip_cols + expiry_cols, axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry2"], axis=1)
except:
protect_table = protect_table.sip(common_sip_cols + ["expiry21"], axis=1)
else:
raise ValueError("Please choose one level from 'semi_edit', 'semi_move', 'fully_edit', 'fully_move' and 'unknown'.")
protect_table = protect_table.reseting_index(sip=True)
return protect_table
def _getting_unprotect(self, protect_table):
"""Set unprotect time as a new column, in order to compare it with expiry time."""
pp_log_shifting = protect_table.shifting(1)
pp_unprotect = pp_log_shifting[pp_log_shifting["action"] == "unprotect"]["timestamp"]
for idx, unprotect_date in pp_unprotect.iteritems():
protect_table.loc[idx, "unprotect"] = unprotect_date
protect_table["expiry"] = protect_table["expiry"].fillnone( | mk.Timestamp.getting_max.replacing(second=0) | pandas.Timestamp.max.replace |
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array(KnowledgeFrame.sip_duplicates(y))
b = np.array2string(a)
c = b.replacing("[", "")
d = c.replacing("]", "")
e = d.replacing("\n", ",")
g = e.replacing('"', "")
f = g.replacing("'", "")
h = f.split(",")
# print(ff)
# print(y.duplicated_values())
change = LabelEncoder()
y['Photos_Change'] = change.fit_transform(y['Photos'])
# y['Date_Change'] = change.fit_transform(y['Date'])
# y['State_Change'] = change.fit_transform(y['State'])
# y['County_Change'] = change.fit_transform(y['County'])
# y['Country_Change'] = change.fit_transform(y['Country'])
y_n = y.sip(['Photos'], axis='columns')
aa = np.array( | KnowledgeFrame.sip_duplicates(y) | pandas.DataFrame.drop_duplicates |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raincontaing) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_grouper_as_index_collections_scalar gettings here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gettings here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not total_allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = GroupByApply(self, [func], args=(), kwargs={})
try:
result = gba.agg()
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
else:
sobj = self._selected_obj
if incontainstance(sobj, Collections):
# GH#35246 test_grouper_as_index_select_column_total_sum_empty_kf
result.columns = self._obj_with_exclusions.columns.clone()
else:
# Retain our column names
result.columns._set_names(
sobj.columns.names, level=list(range(sobj.columns.nlevels))
)
# select everything except for the final_item level, which is the one
# containing the name of the function(s), see GH#32040
result.columns = result.columns.siplevel(-1)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = Index(range(length(result)))
return result
agg = aggregate
def _iterate_slices(self) -> Iterable[Collections]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if incontainstance(obj, Collections) and obj.name not in self.exclusions:
# Occurs when doing KnowledgeFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _aggregate_frame(self, func, *args, **kwargs) -> KnowledgeFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
if self.axis == 0:
# test_pass_args_kwargs_duplicate_columns gettings here with non-distinctive columns
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
# we getting here in a number of test_multilevel tests
for name in self.indices:
grp_kf = self.getting_group(name, obj=obj)
fres = func(grp_kf, *args, **kwargs)
result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
out = self.obj._constructor(result, index=other_ax, columns=result_index)
if self.axis == 0:
out = out.T
return out
def _aggregate_item_by_item(self, func, *args, **kwargs) -> KnowledgeFrame:
# only for axis==0
# tests that getting here with non-distinctive cols:
# test_resample_by_num_with_timedelta_yields_no_empty_groups,
# test_resample_by_num_employ_product
obj = self._obj_with_exclusions
result: dict[int, NDFrame] = {}
for i, (item, sgb) in enumerate(self._iterate_column_groupers(obj)):
result[i] = sgb.aggregate(func, *args, **kwargs)
res_kf = self.obj._constructor(result)
res_kf.columns = obj.columns
return res_kf
def _wrap_applied_output(
self, data: KnowledgeFrame, values: list, not_indexed_same: bool = False
):
if length(values) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.totype(data.dtypes.convert_dict(), clone=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif incontainstance(first_not_none, KnowledgeFrame):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if incontainstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengthgths
# ftotal_all through to the outer else clause
# TODO: sure this is right? we used to do this
# after raincontaing AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection
)
elif not incontainstance(first_not_none, Collections):
# values are not collections or array-like but scalars
# self._selection not passed through to Collections as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = self.obj._constructor(values, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Collections
return self._wrap_applied_output_collections(
values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_collections(
self,
values: list[Collections],
not_indexed_same: bool,
first_not_none,
key_index,
) -> KnowledgeFrame | Collections:
# this is to silengthce a DeprecationWarning
# TODO: Remove when default dtype of empty Collections is object
kwargs = first_not_none._construct_axes_dict()
backup = create_collections_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
total_all_indexed_same = | total_all_indexes_same(x.index for x in values) | pandas.core.indexes.api.all_indexes_same |
import monkey as mk
import sys
import os
sys.path.adding('../..')
from realism.realism_utils import make_orderbook_for_analysis, MID_PRICE_CUTOFF
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import numpy as np
from datetime import timedelta, datetime
import argparse
import json
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 10000
# PLOT_PARAMS_DICT = {
# 'xgetting_min': '09:32:00',
# 'xgetting_max': '13:30:00',
# 'linewidth': 0.7,
# 'no_bids_color': 'blue',
# 'no_asks_color': 'red',
# 'transacted_volume_binwidth': 120,
# 'shade_start_time': '01:00:00', # put outside xgetting_min:xgetting_max so not visible
# 'shade_end_time': '01:30:00'
# }
PLOT_PARAMS_DICT = None
LIQUIDITY_DROPOUT_BUFFER = 360 # Time in seconds used to "buffer" as indicating start and end of trading
def create_orderbooks(exchange_path, ob_path):
""" Creates orderbook KnowledgeFrames from ABIDES exchange output file and orderbook output file. """
print("Constructing orderbook...")
processed_orderbook = make_orderbook_for_analysis(exchange_path, ob_path, num_levels=1,
hide_liquidity_collapse=False)
cleaned_orderbook = processed_orderbook[(processed_orderbook['MID_PRICE'] > - MID_PRICE_CUTOFF) &
(processed_orderbook['MID_PRICE'] < MID_PRICE_CUTOFF)]
transacted_orders = cleaned_orderbook.loc[cleaned_orderbook.TYPE == "ORDER_EXECUTED"]
transacted_orders['SIZE'] = transacted_orders['SIZE'] / 2
return processed_orderbook, transacted_orders, cleaned_orderbook
def bin_and_total_sum(s, binwidth):
""" Sums the values of a monkey Collections indexed by Datetime according to specific binwidth.
:param s: collections of values to process
:type s: mk.Collections with mk.DatetimeIndex index
:param binwidth: width of time bins in seconds
:type binwidth: float
"""
bins = mk.interval_range(start=s.index[0].floor('getting_min'), end=s.index[-1].ceiling('getting_min'),
freq=mk.DateOffset(seconds=binwidth))
binned = mk.cut(s.index, bins=bins)
counted = s.grouper(binned).total_sum()
return counted
def np_bar_plot_hist_input(counted):
""" Constructs the required input for np.bar to produce a histogram plot of the output provided from
__name__.bin_and_total_sum
:param counted: output from __name__.bin_and_total_sum
:type counted: mk.Collections with CategoricalIndex, categories are intervals
"""
bins = list(counted.index.categories.left) + [counted.index.categories.right[-1]]
bins = np.array([ | mk.Timestamp.convert_pydatetime(x) | pandas.Timestamp.to_pydatetime |
import tensorflow as tf
import numpy as np
from total_allengthnlp.data.fields import ArrayField
from total_allengthnlp.data import Instance
import pickle
from collections import Counter
import clone
import monkey as mk
def _getting_label_majority_vote(instance, treat_tie_as):
maj_vote = [None] * length(instance['tokens'])
for i in range(length(instance['tokens'])):
# Collects the votes for the ith token
votes = {}
for lf_labels in instance['WISER_LABELS'].values():
if lf_labels[i] not in votes:
votes[lf_labels[i]] = 0
votes[lf_labels[i]] += 1
# Takes the majority vote, not counting abstentions
try:
del votes['ABS']
except KeyError:
pass
if length(votes) == 0:
maj_vote[i] = treat_tie_as
elif length(votes) == 1:
maj_vote[i] = list(votes.keys())[0]
else:
sort = sorted(votes.keys(), key=lambda x: votes[x], reverse=True)
first, second = sort[0:2]
if votes[first] == votes[second]:
maj_vote[i] = treat_tie_as
else:
maj_vote[i] = first
return maj_vote
def getting_mv_label_distribution(instances, label_to_ix, treat_tie_as):
distribution = []
for instance in instances:
mv = _getting_label_majority_vote(instance, treat_tie_as)
for vote in mv:
p = [0.0] * length(label_to_ix)
p[label_to_ix[vote]] = 1.0
distribution.adding(p)
return np.array(distribution)
def getting_unweighted_label_distribution(instances, label_to_ix, treat_abs_as):
# Counts votes
distribution = []
for instance in instances:
for i in range(length(instance['tokens'])):
votes = [0] * length(label_to_ix)
for vote in instance['WISER_LABELS'].values():
if vote[i] != "ABS":
votes[label_to_ix[vote[i]]] += 1
distribution.adding(votes)
# For each token, adds one vote for the default if there are none
distribution = np.array(distribution)
for i, check in enumerate(distribution.total_sum(axis=1) == 0):
if check:
distribution[i, label_to_ix[treat_abs_as]] = 1
# Normalizes the counts
distribution = distribution / np.expand_dims(distribution.total_sum(axis=1), 1)
return distribution
def _score_token_accuracy(predicted_labels, gold_labels):
if length(predicted_labels) != length(gold_labels):
raise ValueError("Lengths of predicted_labels and gold_labels must match")
correct = 0
votes = 0
for i in range(length(gold_labels)):
predict = predicted_labels[i]
gold = gold_labels[i]
if length(predict) > 2:
predict = predict[2:]
if length(gold) > 2:
gold = gold[2:]
if predict == gold:
correct += 1
if predicted_labels[i] != 'ABS':
votes += 1
return correct, votes
def _score_sequence_token_level(predicted_labels, gold_labels):
if length(predicted_labels) != length(gold_labels):
raise ValueError("Lengths of predicted_labels and gold_labels must match")
tp, fp, fn = 0, 0, 0
for i in range(length(predicted_labels)):
prediction = predicted_labels[i]
gold = gold_labels[i]
if gold[0] == 'I' or gold[0] == 'B':
if prediction[2:] == gold[2:]:
tp += 1
elif prediction[0] == 'I' or prediction[0] == 'B':
fp += 1
fn += 1
else:
fn += 1
elif prediction[0] == 'I' or prediction[0] == 'B':
fp += 1
return tp, fp, fn
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_token_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results = | mk.KnowledgeFrame.sorting_index(results) | pandas.DataFrame.sort_index |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement KnowledgeFrame public API as Monkey does.
Almost total_all docstrings for public and magic methods should be inherited from Monkey
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manutotal_ally add documentation for methods which are not presented in monkey.
"""
import monkey
from monkey.core.common import employ_if_ctotal_allable
from monkey.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from monkey.core.indexes.api import ensure_index_from_sequences
from monkey.util._validators import validate_bool_kwarg
from monkey.io.formatings.printing import pprint_thing
from monkey._libs.lib import no_default
from monkey._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_monkey, hashable
from modin.config import IsExperimental
from .utils import (
from_monkey,
from_non_monkey,
)
from .iterator import PartitionIterator
from .collections import Collections
from .base import BaseMonkeyDataset, _ATTRS_NO_LOOKUP
from .grouper import KnowledgeFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(monkey.KnowledgeFrame, excluded=[monkey.KnowledgeFrame.__init__])
class KnowledgeFrame(BaseMonkeyDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
clone=False,
query_compiler=None,
):
"""
Distributed KnowledgeFrame object backed by Monkey knowledgeframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Collections, arrays, constants, or list-like
objects.
index: monkey.Index, list, ObjectID
The row index for this KnowledgeFrame.
columns: monkey.Index
The column names for this KnowledgeFrame, in monkey Index object.
dtype: Data type to force.
Only a single dtype is total_allowed. If None, infer
clone: bool
Copy data from inputs. Only affects KnowledgeFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if incontainstance(data, (KnowledgeFrame, Collections)):
self._query_compiler = data._query_compiler.clone()
if index is not None and whatever(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if incontainstance(data, Collections):
# We set the column name if it is not in the provided Collections
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Collections, monkey clears
# the KnowledgeFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_monkey(
KnowledgeFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and whatever(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_monkey(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".formating(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = gettingattr(data, "dtype", None)
values = [
obj._to_monkey() if incontainstance(obj, Collections) else obj for obj in data
]
if incontainstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not incontainstance(
data, (monkey.Collections, Collections, monkey.KnowledgeFrame, KnowledgeFrame)
):
data = {
k: v._to_monkey() if incontainstance(v, Collections) else v
for k, v in data.items()
}
monkey_kf = monkey.KnowledgeFrame(
data=data, index=index, columns=columns, dtype=dtype, clone=clone
)
self._query_compiler = from_monkey(monkey_kf)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from monkey.io.formatings import console
num_rows = monkey.getting_option("display.getting_max_rows") or 10
num_cols = monkey.getting_option("display.getting_max_columns") or 20
if monkey.getting_option("display.getting_max_columns") is None and monkey.getting_option(
"display.expand_frame_repr"
):
width, _ = console.getting_console_size()
width = getting_min(width, length(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i += 1
num_cols = i
i = length(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i -= 1
num_cols += length(self.columns) - i
result = repr(self._build_repr_kf(num_rows, num_cols))
if length(self.index) > num_rows or length(self.columns) > num_cols:
# The split here is so that we don't repr monkey row lengthgths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".formating(
length(self.index), length(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = monkey.getting_option("getting_max_rows") or 60
num_cols = monkey.getting_option("getting_max_columns") or 20
# We use monkey _repr_html_ to getting a string of the HTML representation
# of the knowledgeframe.
result = self._build_repr_kf(num_rows, num_cols)._repr_html_()
if length(self.index) > num_rows or length(self.columns) > num_cols:
# We split so that we insert our correct knowledgeframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</division>".formating(
length(self.index), length(self.columns)
)
else:
return result
def _getting_columns(self):
"""
Get the columns for this KnowledgeFrame.
Returns
-------
The union of total_all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this KnowledgeFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_getting_columns, _set_columns)
@property
def ndim(self):
# KnowledgeFrames have an invariant that requires they be 2 dimensions.
return 2
def sip_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(KnowledgeFrame, self).sip_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated_values(self, subset=None, keep="first"):
import hashlib
kf = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if length(kf.columns) > 1:
hashed = kf.employ(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = kf
duplicates = hashed.employ(lambda s: s.duplicated_values(keep=keep)).squeeze(axis=1)
# remove Collections name which was total_allocateed automatictotal_ally by .employ
duplicates.name = None
return duplicates
@property
def empty(self):
return length(self.columns) == 0 or length(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return length(self.index), length(self.columns)
def add_prefix(self, prefix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def employmapping(self, func):
if not ctotal_allable(func):
raise ValueError("'{0}' object is not ctotal_allable".formating(type(func)))
ErrorMessage.non_verified_ukf()
return KnowledgeFrame(query_compiler=self._query_compiler.employmapping(func))
def employ(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._getting_axis_number(axis)
query_compiler = super(KnowledgeFrame, self).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not incontainstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to detergetting_mine the return type, but there are checks
# in monkey that verify that some results are created. This is a chtotal_allengthge for
# empty KnowledgeFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which averages that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
gettingattr(monkey, type(self).__name__)(**init_kwargs).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["KnowledgeFrame", "Collections"]:
return query_compiler.to_monkey().squeeze()
else:
result = gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if incontainstance(result, Collections):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def grouper(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
sipna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._getting_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to sip the data column before doing the
# grouper. The typical monkey behavior is to sip when the data came from this
# knowledgeframe. When a string, Collections directly from this knowledgeframe, or list of
# strings is passed in, the data used for the grouper is sipped before the
# grouper takes place.
sip = False
if (
not incontainstance(by, (monkey.Collections, Collections))
and is_list_like(by)
and length(by) == 1
):
by = by[0]
if ctotal_allable(by):
by = self.index.mapping(by)
elif incontainstance(by, str):
sip = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__gettingitem__(by)._query_compiler
elif incontainstance(by, Collections):
sip = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column grouper
if (
not incontainstance(by, Collections)
and axis == 0
and total_all(
(
(incontainstance(o, str) and (o in self))
or (incontainstance(o, Collections) and (o._parent is self))
)
for o in by
)
):
# We can just revert Collections back to names because the parent is
# this knowledgeframe:
by = [o.name if incontainstance(o, Collections) else o for o in by]
by = self.__gettingitem__(by)._query_compiler
sip = True
else:
mismatch = length(by) != length(self.axes[axis])
if mismatch and total_all(
incontainstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to monkey in this case.
pass
elif mismatch and whatever(
incontainstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if incontainstance(o, Collections) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return KnowledgeFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
sip=sip,
sipna=sipna,
)
def keys(self):
return self.columns
def transpose(self, clone=False, *args):
return KnowledgeFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def adding(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/monkey-dev/monkey/issues/35092, "
"Monkey ignores sort=False; Modin correctly does not sort."
)
if incontainstance(other, (Collections, dict)):
if incontainstance(other, dict):
other = Collections(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only adding a Collections if ignore_index=True"
" or if the Collections has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Collections becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = monkey.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif incontainstance(other, list):
if not total_all(incontainstance(o, BaseMonkeyDataset) for o in other):
other = KnowledgeFrame(monkey.KnowledgeFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
addinged_index = (
self.index.adding(other.index)
if not incontainstance(other, list)
else self.index.adding([o.index for o in other])
)
is_valid = next((False for idx in addinged_index.duplicated_values() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".formating(
addinged_index[addinged_index.duplicated_values()]
)
)
query_compiler = self._query_compiler.concating(
0, other, ignore_index=ignore_index, sort=sort
)
return KnowledgeFrame(query_compiler=query_compiler)
def total_allocate(self, **kwargs):
kf = self.clone()
for k, v in kwargs.items():
if ctotal_allable(v):
kf[k] = v(kf)
else:
kf[k] = v
return kf
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_monkey(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(KnowledgeFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "KnowledgeFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "KnowledgeFrame":
return self._default_to_monkey(
monkey.KnowledgeFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", getting_min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
getting_min_periods=getting_min_periods,
)
)
def corrwith(self, other, axis=0, sip=False, method="pearson"):
if incontainstance(other, KnowledgeFrame):
other = other._query_compiler.to_monkey()
return self._default_to_monkey(
monkey.KnowledgeFrame.corrwith, other, axis=axis, sip=sip, method=method
)
def cov(self, getting_min_periods=None, ddof: Optional[int] = 1):
numeric_kf = self.sip(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if total_all(numeric_kf.notna().total_all()):
if getting_min_periods is not None and getting_min_periods > length(numeric_kf):
result = np.empty((numeric_kf.shape[1], numeric_kf.shape[1]))
result.fill(np.nan)
return numeric_kf.__constructor__(result)
else:
cols = numeric_kf.columns
idx = cols.clone()
numeric_kf = numeric_kf.totype(dtype="float64")
denom = 1.0 / (length(numeric_kf) - ddof)
averages = numeric_kf.average(axis=0)
result = numeric_kf - averages
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_kf._query_compiler.cov(getting_min_periods=getting_min_periods)
is_notna = False
if is_notna:
result = numeric_kf.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_kf.__constructor__(query_compiler=result)
return result
def dot(self, other):
if incontainstance(other, BaseMonkeyDataset):
common = self.columns.union(other.index)
if length(common) > length(self.columns) or length(common) > length(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindexing(index=common)._query_compiler
if incontainstance(other, KnowledgeFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".formating(self.shape, other.shape)
)
if length(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def equals(self, other):
if incontainstance(other, monkey.KnowledgeFrame):
# Copy into a Modin KnowledgeFrame to simplify logic below
other = KnowledgeFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).total_all().total_all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_monkey(
monkey.KnowledgeFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no total_allocatement")
return gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_dict`")
return from_monkey(
monkey.KnowledgeFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_records`")
return from_monkey(
monkey.KnowledgeFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, getting_max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_length=None, spaces=2):
src = str(src)
return src.ljust(output_length if output_length else length(src)) + " " * spaces
def formating_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._total_summary()
columns = self.columns
columns_length = length(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.formating(dtype, count) for dtype, count in dtypes.counts_value_num().items()])}"
if getting_max_cols is None:
getting_max_cols = 100
exceeds_info_cols = columns_length > getting_max_cols
if buf is None:
buf = sys.standardout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Collections`, that's why we ctotal_all `_to_monkey()` here
# that will be faster.
non_null_count = self.count()._to_monkey()
if memory_usage is None:
memory_usage = True
def getting_header_numer(spaces=2):
output = []
header_num_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengthgths = {}
lengthgths["header_num"] = getting_max(length(header_num_label), length(pprint_thing(length(columns))))
lengthgths["column"] = getting_max(
length(column_label), getting_max(length(pprint_thing(col)) for col in columns)
)
lengthgths["dtype"] = length(dtype_label)
dtype_spaces = (
getting_max(lengthgths["dtype"], getting_max(length(pprint_thing(dtype)) for dtype in dtypes))
- lengthgths["dtype"]
)
header_numer = put_str(header_num_label, lengthgths["header_num"]) + put_str(
column_label, lengthgths["column"]
)
if null_counts:
lengthgths["null"] = getting_max(
length(null_label),
getting_max(length(pprint_thing(x)) for x in non_null_count)
+ length(non_null_label),
)
header_numer += put_str(null_label, lengthgths["null"])
header_numer += put_str(dtype_label, lengthgths["dtype"], spaces=dtype_spaces)
output.adding(header_numer)
delimiters = put_str(delimiter * lengthgths["header_num"]) + put_str(
delimiter * lengthgths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengthgths["null"])
delimiters += put_str(delimiter * lengthgths["dtype"], spaces=dtype_spaces)
output.adding(delimiters)
return output, lengthgths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {length(columns)} columns):"
header_numer, lengthgths = getting_header_numer()
output.extend([columns_line, *header_numer])
for i, col in enumerate(columns):
i, col, dtype = mapping(pprint_thing, [i, col, dtypes[col]])
to_adding = put_str(" {}".formating(i), lengthgths["header_num"]) + put_str(
col, lengthgths["column"]
)
if null_counts:
non_null = pprint_thing(non_null_count[col])
to_adding += put_str(
"{} non-null".formating(non_null), lengthgths["null"]
)
to_adding += put_str(dtype, lengthgths["dtype"], spaces=0)
output.adding(to_adding)
def non_verbose_repr(output):
output.adding(columns._total_summary(name="Columns"))
if verbose:
verbose_repr(output)
else:
non_verbose_repr(output)
output.adding(dtypes_line)
if memory_usage:
deep = memory_usage == "deep"
mem_usage_bytes = self.memory_usage(index=True, deep=deep).total_sum()
mem_line = f"memory usage: {formating_size(mem_usage_bytes)}"
output.adding(mem_line)
output.adding("")
buf.write("\n".join(output))
def insert(self, loc, column, value, total_allow_duplicates=False):
if incontainstance(value, (KnowledgeFrame, monkey.KnowledgeFrame)):
if length(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if incontainstance(value, Collections):
# TODO: Remove broadcast of Collections
value = value._to_monkey()
if not self._query_compiler.lazy_execution and length(self.index) == 0:
try:
value = monkey.Collections(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a KnowledgeFrame with no defined index "
"and a value that cannot be converted to a "
"Collections"
)
new_index = value.index.clone()
new_columns = self.columns.insert(loc, column)
new_query_compiler = KnowledgeFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif length(self.columns) == 0 and loc == 0:
new_query_compiler = KnowledgeFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not incontainstance(value, monkey.Collections)
and length(value) != length(self.index)
):
raise ValueError("Length of values does not match lengthgth of index")
if not total_allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".formating(column))
if loc > length(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".formating(
loc, length(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._umkate_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
):
return self._default_to_monkey(
monkey.KnowledgeFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def traversal(self):
def iterrow_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
def items_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
return self.items()
def itertuples(self, index=True, name="Monkey"):
def itertuples_builder(s):
return next(s._to_monkey().to_frame().T.itertuples(index=index, name=name))
partition_iterator = PartitionIterator(self, 0, itertuples_builder)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
if incontainstance(other, Collections):
if other.name is None:
raise ValueError("Other Collections must have a name")
other = KnowledgeFrame({other.name: other})
if on is not None:
return self.__constructor__(
query_compiler=self._query_compiler.join(
other._query_compiler,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
if incontainstance(other, KnowledgeFrame):
# Joining the empty KnowledgeFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
new_columns = (
monkey.KnowledgeFrame(columns=self.columns)
.join(
monkey.KnowledgeFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
other = [other]
else:
# This constraint carried over from Monkey.
if on is not None:
raise ValueError(
"Joining multiple KnowledgeFrames only supported for joining on index"
)
new_columns = (
monkey.KnowledgeFrame(columns=self.columns)
.join(
[monkey.KnowledgeFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
)
.columns
)
new_frame = KnowledgeFrame(
query_compiler=self._query_compiler.concating(
1, [obj._query_compiler for obj in other], join=how, sort=sort
)
)
new_frame.columns = new_columns
return new_frame
def le(self, other, axis="columns", level=None):
return self._binary_op(
"le", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def lookup(self, row_labels, col_labels):
return self._default_to_monkey(monkey.KnowledgeFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
return self._binary_op(
"lt", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
return self.__constructor__(
query_compiler=self._query_compiler.melt(
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
)
def memory_usage(self, index=True, deep=False):
if index:
result = self._reduce_dimension(
self._query_compiler.memory_usage(index=False, deep=deep)
)
index_value = self.index.memory_usage(deep=deep)
return Collections(index_value, index=["Index"]).adding(result)
return super(KnowledgeFrame, self).memory_usage(index=index, deep=deep)
def unioner(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
clone=True,
indicator=False,
validate=None,
):
if incontainstance(right, Collections):
if right.name is None:
raise ValueError("Cannot unioner a Collections without a name")
else:
right = right.to_frame()
if not incontainstance(right, KnowledgeFrame):
raise TypeError(
f"Can only unioner Collections or KnowledgeFrame objects, a {type(right)} was passed"
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
return self.__constructor__(
query_compiler=self._query_compiler.unioner(
right._query_compiler,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
clone=clone,
indicator=indicator,
validate=validate,
)
)
def mod(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mod",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def mul(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"mul",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
rmul = multiply = mul
def ne(self, other, axis="columns", level=None):
return self._binary_op(
"ne", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def nbiggest(self, n, columns, keep="first"):
return KnowledgeFrame(query_compiler=self._query_compiler.nbiggest(n, columns, keep))
def nsmtotal_allest(self, n, columns, keep="first"):
return KnowledgeFrame(
query_compiler=self._query_compiler.nsmtotal_allest(
n=n, columns=columns, keep=keep
)
)
def slice_shifting(self, periods=1, axis=0):
if periods == 0:
return self.clone()
if axis == "index" or axis == 0:
if abs(periods) >= length(self.index):
return KnowledgeFrame(columns=self.columns)
else:
if periods > 0:
new_index = self.index.sip(labels=self.index[:periods])
new_kf = self.sip(self.index[-periods:])
else:
new_index = self.index.sip(labels=self.index[periods:])
new_kf = self.sip(self.index[:-periods])
new_kf.index = new_index
return new_kf
else:
if abs(periods) >= length(self.columns):
return KnowledgeFrame(index=self.index)
else:
if periods > 0:
new_columns = self.columns.sip(labels=self.columns[:periods])
new_kf = self.sip(self.columns[-periods:], axis="columns")
else:
new_columns = self.columns.sip(labels=self.columns[periods:])
new_kf = self.sip(self.columns[:-periods], axis="columns")
new_kf.columns = new_columns
return new_kf
def unstack(self, level=-1, fill_value=None):
if not incontainstance(self.index, monkey.MultiIndex) or (
incontainstance(self.index, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.index.nlevels
):
return self._reduce_dimension(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
else:
return KnowledgeFrame(
query_compiler=self._query_compiler.unstack(level, fill_value)
)
def pivot(self, index=None, columns=None, values=None):
return self.__constructor__(
query_compiler=self._query_compiler.pivot(
index=index, columns=columns, values=values
)
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="average",
fill_value=None,
margins=False,
sipna=True,
margins_name="All",
observed=False,
):
result = KnowledgeFrame(
query_compiler=self._query_compiler.pivot_table(
index=index,
values=values,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
sipna=sipna,
margins_name=margins_name,
observed=observed,
)
)
return result
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormapping=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs,
):
return self._to_monkey().plot
def pow(self, other, axis="columns", level=None, fill_value=None):
if incontainstance(other, Collections):
return self._default_to_monkey(
"pow", other, axis=axis, level=level, fill_value=fill_value
)
return self._binary_op(
"pow",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
getting_min_count=0,
**kwargs,
):
axis = self._getting_axis_number(axis)
axis_to_employ = self.columns if axis else self.index
if (
skipna is not False
and numeric_only is None
and getting_min_count > length(axis_to_employ)
):
new_index = self.columns if not axis else self.index
return Collections(
[np.nan] * length(new_index), index=new_index, dtype=np.dtype("object")
)
data = self._validate_dtypes_total_sum_prod_average(axis, numeric_only, ignore_axis=True)
if level is not None:
return data.__constructor__(
query_compiler=data._query_compiler.prod_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
if getting_min_count > 1:
return data._reduce_dimension(
data._query_compiler.prod_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
product = prod
radd = add
def query(self, expr, inplace=False, **kwargs):
ErrorMessage.non_verified_ukf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
def renagetting_ming(
self,
mappingper=None,
index=None,
columns=None,
axis=None,
clone=True,
inplace=False,
level=None,
errors="ignore",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if mappingper is None and index is None and columns is None:
raise TypeError("must pass an index to renagetting_ming")
# We have to do this with the args because of how renagetting_ming handles kwargs. It
# doesn't ignore None values passed in, so we have to filter them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a clone, and we will use the
# results after.
kwargs["inplace"] = False
if axis is not None:
axis = self._getting_axis_number(axis)
if index is not None or (mappingper is not None and axis == 0):
new_index = monkey.KnowledgeFrame(index=self.index).renagetting_ming(**kwargs).index
else:
new_index = None
if columns is not None or (mappingper is not None and axis == 1):
new_columns = (
monkey.KnowledgeFrame(columns=self.columns).renagetting_ming(**kwargs).columns
)
else:
new_columns = None
if inplace:
obj = self
else:
obj = self.clone()
if new_index is not None:
obj.index = new_index
if new_columns is not None:
obj.columns = new_columns
if not inplace:
return obj
def replacing(
self,
to_replacing=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.replacing(
to_replacing=to_replacing,
value=value,
inplace=False,
limit=limit,
regex=regex,
method=method,
)
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
def rfloordivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rfloordivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def rmod(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rmod",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
if incontainstance(other, Collections):
return self._default_to_monkey(
"rpow", other, axis=axis, level=level, fill_value=fill_value
)
return self._binary_op(
"rpow",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def rsub(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rsub",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def rtruedivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rtruedivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
rdivision = rtruedivision
def choose_dtypes(self, include=None, exclude=None):
# Validates arguments for whether both include and exclude are None or
# if they are disjoint. Also invalidates string dtypes.
monkey.KnowledgeFrame().choose_dtypes(include, exclude)
if include and not is_list_like(include):
include = [include]
elif include is None:
include = []
if exclude and not is_list_like(exclude):
exclude = [exclude]
elif exclude is None:
exclude = []
sel = tuple(mapping(set, (include, exclude)))
include, exclude = mapping(lambda x: set(mapping(infer_dtype_from_object, x)), sel)
include_these = monkey.Collections(not bool(include), index=self.columns)
exclude_these = monkey.Collections(not bool(exclude), index=self.columns)
def is_dtype_instance_mappingper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmapping(
is_dtype_instance_mappingper, self.dtypes.iteritems()
):
if include: # checks for the case of empty include or exclude
include_these[column] = whatever(mapping(f, include))
if exclude:
exclude_these[column] = not whatever(mapping(f, exclude))
dtype_indexer = include_these & exclude_these
indicate = [
i for i in range(length(dtype_indexer.values)) if not dtype_indexer.values[i]
]
return self.sip(columns=self.columns[indicate], inplace=False)
def set_index(
self, keys, sip=True, adding=False, inplace=False, verify_integrity=False
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not incontainstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.clone()
arrays = []
names = []
if adding:
names = [x for x in self.index.names]
if self._query_compiler.has_multiindex():
for i in range(self.index.nlevels):
arrays.adding(self.index._getting_level_values(i))
else:
arrays.adding(self.index)
to_remove = []
for col in keys:
if incontainstance(col, monkey.MultiIndex):
# adding total_all but the final_item column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.adding(col._getting_level_values(n))
level = col._getting_level_values(col.nlevels - 1)
names.extend(col.names)
elif incontainstance(col, monkey.Collections):
level = col._values
names.adding(col.name)
elif incontainstance(col, monkey.Index):
level = col
names.adding(col.name)
elif incontainstance(col, (list, np.ndarray, monkey.Index)):
level = col
names.adding(None)
else:
level = frame[col]._to_monkey()._values
names.adding(col)
if sip:
to_remove.adding(col)
arrays.adding(level)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_distinctive:
duplicates = index.getting_duplicates()
raise ValueError("Index has duplicate keys: %s" % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
def squeeze(self, axis=None):
axis = self._getting_axis_number(axis) if axis is not None else None
if axis is None and (length(self.columns) == 1 or length(self.index) == 1):
return Collections(query_compiler=self._query_compiler).squeeze()
if axis == 1 and length(self.columns) == 1:
return Collections(query_compiler=self._query_compiler)
if axis == 0 and length(self.index) == 1:
return Collections(query_compiler=self.T._query_compiler)
else:
return self.clone()
def stack(self, level=-1, sipna=True):
if not incontainstance(self.columns, monkey.MultiIndex) or (
incontainstance(self.columns, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.columns.nlevels
):
return self._reduce_dimension(
query_compiler=self._query_compiler.stack(level, sipna)
)
else:
return KnowledgeFrame(query_compiler=self._query_compiler.stack(level, sipna))
def sub(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"sub",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
subtract = sub
def total_sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
getting_min_count=0,
**kwargs,
):
axis = self._getting_axis_number(axis)
axis_to_employ = self.columns if axis else self.index
if (
skipna is not False
and numeric_only is None
and getting_min_count > length(axis_to_employ)
):
new_index = self.columns if not axis else self.index
return Collections(
[np.nan] * length(new_index), index=new_index, dtype=np.dtype("object")
)
data = self._validate_dtypes_total_sum_prod_average(
axis, numeric_only, ignore_axis=False
)
if level is not None:
return data.__constructor__(
query_compiler=data._query_compiler.total_sum_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
if getting_min_count > 1:
return data._reduce_dimension(
data._query_compiler.total_sum_getting_min_count(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
return data._reduce_dimension(
data._query_compiler.total_sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
getting_min_count=getting_min_count,
**kwargs,
)
)
def to_feather(self, path, **kwargs): # pragma: no cover
return self._default_to_monkey(monkey.KnowledgeFrame.to_feather, path, **kwargs)
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.to_gbq,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header_numer=True,
index=True,
na_rep="NaN",
formatingters=None,
float_formating=None,
sparsify=None,
index_names=True,
justify=None,
getting_max_rows=None,
getting_max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
return self._default_to_monkey(
monkey.KnowledgeFrame.to_html,
buf=buf,
columns=columns,
col_space=col_space,
header_numer=header_numer,
index=index,
na_rep=na_rep,
formatingters=formatingters,
float_formating=float_formating,
sparsify=sparsify,
index_names=index_names,
justify=justify,
getting_max_rows=getting_max_rows,
getting_max_cols=getting_max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
bold_rows=bold_rows,
classes=classes,
escape=escape,
notebook=notebook,
border=border,
table_id=table_id,
render_links=render_links,
encoding=None,
)
def to_parquet(
self,
path,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs,
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.to_parquet,
path,
engine=engine,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs,
)
def to_period(self, freq=None, axis=0, clone=True): # pragma: no cover
return super(KnowledgeFrame, self).to_period(freq=freq, axis=axis, clone=clone)
def to_records(self, index=True, column_dtypes=None, index_dtypes=None):
return self._default_to_monkey(
monkey.KnowledgeFrame.to_records,
index=index,
column_dtypes=column_dtypes,
index_dtypes=index_dtypes,
)
def to_stata(
self,
path,
convert_dates=None,
write_index=True,
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
version=114,
convert_strl=None,
compression: Union[str, Mapping[str, str], None] = "infer",
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.to_stata,
path,
convert_dates=convert_dates,
write_index=write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
version=version,
convert_strl=convert_strl,
compression=compression,
)
def to_timestamp(self, freq=None, how="start", axis=0, clone=True):
return super(KnowledgeFrame, self).to_timestamp(
freq=freq, how=how, axis=axis, clone=clone
)
def truedivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"truedivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
division = divisionide = truedivision
def umkate(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
):
if not incontainstance(other, KnowledgeFrame):
other = KnowledgeFrame(other)
query_compiler = self._query_compiler.kf_umkate(
other._query_compiler,
join=join,
overwrite=overwrite,
filter_func=filter_func,
errors=errors,
)
self._umkate_inplace(new_query_compiler=query_compiler)
def counts_value_num(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
return self._default_to_monkey(
"counts_value_num",
subset=subset,
normalize=normalize,
sort=sort,
ascending=ascending,
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
if incontainstance(other, monkey.Collections) and axis is None:
raise ValueError("Must specify axis=0 or 1")
if level is not None:
if incontainstance(other, KnowledgeFrame):
other = other._query_compiler.to_monkey()
if incontainstance(cond, KnowledgeFrame):
cond = cond._query_compiler.to_monkey()
new_query_compiler = self._default_to_monkey(
monkey.KnowledgeFrame.where,
cond,
other=other,
inplace=False,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
)
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
axis = self._getting_axis_number(axis)
cond = cond(self) if ctotal_allable(cond) else cond
if not incontainstance(cond, KnowledgeFrame):
if not hasattr(cond, "shape"):
cond = np.aswhateverarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = KnowledgeFrame(cond, index=self.index, columns=self.columns)
if incontainstance(other, KnowledgeFrame):
other = other._query_compiler
elif incontainstance(other, monkey.Collections):
other = other.reindexing(self.index if not axis else self.columns)
else:
index = self.index if not axis else self.columns
other = monkey.Collections(other, index=index)
query_compiler = self._query_compiler.where(
cond._query_compiler, other, axis=axis, level=level
)
return self._create_or_umkate_from_compiler(query_compiler, inplace)
def xs(self, key, axis=0, level=None, sip_level=True):
return self._default_to_monkey(
monkey.KnowledgeFrame.xs, key, axis=axis, level=level, sip_level=sip_level
)
def _gettingitem_column(self, key):
if key not in self.keys():
raise KeyError("{}".formating(key))
s = KnowledgeFrame(
query_compiler=self._query_compiler.gettingitem_column_array([key])
).squeeze(axis=1)
if incontainstance(s, Collections):
s._parent = self
s._parent_axis = 1
return s
def __gettingattr__(self, key):
try:
return object.__gettingattribute__(self, key)
except AttributeError as e:
if key not in _ATTRS_NO_LOOKUP and key in self.columns:
return self[key]
raise e
def __setattr__(self, key, value):
# We have to check for this first because we have to be able to set
# _query_compiler before we check if the key is in self
if key in ["_query_compiler"] or key in self.__dict__:
pass
elif key in self and key not in dir(self):
self.__setitem__(key, value)
elif incontainstance(value, monkey.Collections):
warnings.warn(
"Modin doesn't total_allow columns to be created via a new attribute name - see "
"https://monkey.pydata.org/monkey-docs/stable/indexing.html#attribute-access",
UserWarning,
)
object.__setattr__(self, key, value)
def __setitem__(self, key, value):
if hashable(key) and key not in self.columns:
# Handle new column case first
if incontainstance(value, Collections):
if length(self.columns) == 0:
self._query_compiler = value._query_compiler.clone()
else:
self._create_or_umkate_from_compiler(
self._query_compiler.concating(1, value._query_compiler),
inplace=True,
)
# Now that the data is addinged, we need to umkate the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# final_item column name from the list (the addinged value's name and adding
# the new name.
self.columns = self.columns[:-1].adding(monkey.Index([key]))
return
elif (
incontainstance(value, (monkey.KnowledgeFrame, KnowledgeFrame)) and value.shape[1] != 1
):
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
elif incontainstance(value, np.ndarray) and length(value.shape) > 1:
if value.shape[1] == 1:
# Transform into columnar table and take first column
value = value.clone().T[0]
else:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
# Do new column total_allocatement after error checks and possible value modifications
self.insert(loc=length(self.columns), column=key, value=value)
return
if not incontainstance(key, str):
if incontainstance(key, KnowledgeFrame) or incontainstance(key, np.ndarray):
if incontainstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array must be same shape as KnowledgeFrame")
key = KnowledgeFrame(key, columns=self.columns)
return self.mask(key, value, inplace=True)
def setitem_without_string_columns(kf):
# Arrow makes memory-mappingped objects immutable, so clone will total_allow them
# to be mutable again.
kf = kf.clone(True)
kf[key] = value
return kf
return self._umkate_inplace(
self._default_to_monkey(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if incontainstance(value, (monkey.KnowledgeFrame, KnowledgeFrame)):
value = value[value.columns[0]].values
elif incontainstance(value, np.ndarray):
assert (
length(value.shape) < 3
), "Shape of new values must be compatible with manager shape"
value = value.T.reshape(-1)
if length(self) > 0:
value = value[: length(self)]
if not incontainstance(value, Collections):
value = list(value)
if not self._query_compiler.lazy_execution and length(self.index) == 0:
new_self = KnowledgeFrame({key: value}, columns=self.columns)
self._umkate_inplace(new_self._query_compiler)
else:
if incontainstance(value, Collections):
value = value._query_compiler
self._umkate_inplace(self._query_compiler.setitem(0, key, value))
def __hash__(self):
return self._default_to_monkey(monkey.KnowledgeFrame.__hash__)
def __iter__(self):
return iter(self.columns)
def __contains__(self, key):
return self.columns.__contains__(key)
def __value_round__(self, decimals=0):
return self._default_to_monkey(monkey.KnowledgeFrame.__value_round__, decimals=decimals)
def __setstate__(self, state):
return self._default_to_monkey(monkey.KnowledgeFrame.__setstate__, state)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
self._umkate_inplace(new_query_compiler=self._query_compiler.delitem(key))
__add__ = add
__iadd__ = add # pragma: no cover
__radd__ = radd
__mul__ = mul
__imul__ = mul # pragma: no cover
__rmul__ = rmul
__pow__ = pow
__ipow__ = pow # pragma: no cover
__rpow__ = rpow
__sub__ = sub
__isub__ = sub # pragma: no cover
__rsub__ = rsub
__floordivision__ = floordivision
__ifloordivision__ = floordivision # pragma: no cover
__rfloordivision__ = rfloordivision
__truedivision__ = truedivision
__itruedivision__ = truedivision # pragma: no cover
__rtruedivision__ = rtruedivision
__mod__ = mod
__imod__ = mod # pragma: no cover
__rmod__ = rmod
__division__ = division
__rdivision__ = rdivision
@property
def attrs(self):
def attrs(kf):
return kf.attrs
self._default_to_monkey(attrs)
@property
def __doc__(self): # pragma: no cover
def __doc__(kf):
"""Define __name__ attr because properties do not have it."""
return kf.__doc__
return self._default_to_monkey(__doc__)
@property
def style(self):
def style(kf):
"""Define __name__ attr because properties do not have it."""
return kf.style
return self._default_to_monkey(style)
def _create_or_umkate_from_compiler(self, new_query_compiler, inplace=False):
"""
Return or umkate a KnowledgeFrame given new query_compiler.
TODO: add description for parameters.
Parameters
----------
new_query_compiler: query_compiler
inplace: bool
Returns
-------
knowledgeframe
"""
assert (
incontainstance(new_query_compiler, type(self._query_compiler))
or type(new_query_compiler) in self._query_compiler.__class__.__bases__
), "Invalid Query Compiler object: {}".formating(type(new_query_compiler))
if not inplace:
return KnowledgeFrame(query_compiler=new_query_compiler)
else:
self._umkate_inplace(new_query_compiler=new_query_compiler)
def _getting_numeric_data(self, axis: int):
"""
Grabs only numeric columns from frame.
Parameters
----------
axis: int
Axis to inspect on having numeric types only.
If axis is not 0, returns the frame itself.
Returns
-------
KnowledgeFrame with numeric data.
"""
# Monkey ignores `numeric_only` if `axis` is 1, but we do have to sip
# non-numeric columns if `axis` is 0.
if axis != 0:
return self
return self.sip(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
def _validate_dtypes(self, numeric_only=False):
"""
Help to check that total_all the dtypes are the same.
TODO: add description for parameters.
Parameters
----------
numeric_only: bool
"""
dtype = self.dtypes[0]
for t in self.dtypes:
if numeric_only and not is_numeric_dtype(t):
raise TypeError("{0} is not a numeric data type".formating(t))
elif not numeric_only and t != dtype:
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".formating(t, dtype)
)
def _validate_dtypes_getting_min_getting_max(self, axis, numeric_only):
# If our KnowledgeFrame has both numeric and non-numeric dtypes then
# comparisons between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring whatever non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
axis
and numeric_only is False
and np.distinctive([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if total_all(
dtype != np.dtype("datetime64[ns]")
and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot compare Numeric and Non-Numeric Types")
return self._getting_numeric_data(axis) if numeric_only else self
def _validate_dtypes_total_sum_prod_average(self, axis, numeric_only, ignore_axis=False):
"""
Raise TypeErrors for total_sum, prod, and average where necessary.
TODO: Add more definal_item_tails for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if whateverthing)
"""
# We cannot add datetime types, so if we are total_sumgetting_ming a column with
# dtype datetime64 and cannot ignore non-numeric types, we must throw a
# TypeError.
if (
not axis
and numeric_only is False
and whatever(dtype == np.dtype("datetime64[ns]") for dtype in self.dtypes)
):
raise TypeError("Cannot add Timestamp Types")
# If our KnowledgeFrame has both numeric and non-numeric dtypes then
# operations between these types do not make sense and we must raise a
# TypeError. The exception to this rule is when there are datetime and
# timedelta objects, in which case we proceed with the comparison
# without ignoring whatever non-numeric types. We must check explicitly if
# numeric_only is False because if it is None, it will default to True
# if the operation fails with mixed dtypes.
if (
(axis or ignore_axis)
and numeric_only is False
and np.distinctive([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2
):
# check if there are columns with dtypes datetime or timedelta
if total_all(
dtype != np.dtype("datetime64[ns]")
and dtype != np.dtype("timedelta64[ns]")
for dtype in self.dtypes
):
raise TypeError("Cannot operate on Numeric and Non-Numeric Types")
return self._getting_numeric_data(axis) if numeric_only else self
def _to_monkey(self):
return self._query_compiler.to_monkey()
def _validate_eval_query(self, expr, **kwargs):
"""
Help to check the arguments to eval() and query().
Parameters
----------
expr: The expression to evaluate. This string cannot contain whatever
Python statements, only Python expressions.
**kwargs
"""
if incontainstance(expr, str) and expr == "":
raise ValueError("expr cannot be an empty string")
if incontainstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if incontainstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented(
"'Not' nodes are not implemented."
) # pragma: no cover
def _reduce_dimension(self, query_compiler):
"""
Implement [METHOD_NAME].
TODO: Add more definal_item_tails for this docstring template.
Parameters
----------
What arguments does this function have.
[
PARAMETER_NAME: PARAMETERS TYPES
Description.
]
Returns
-------
What this returns (if whateverthing)
"""
return Collections(query_compiler=query_compiler)
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Alter the name or names of the axis.
TODO: add types.
Parameters
----------
name:
Name for the Index, or list of names for the MultiIndex
axis:
0 or 'index' for the index; 1 or 'columns' for the columns
inplace:
Whether to modify `self` directly or return a clone
Returns
-------
Type of ctotal_aller or None if inplace=True.
"""
axis = self._getting_axis_number(axis)
renagetting_mingd = self if inplace else self.clone()
if axis == 0:
renagetting_mingd.index = renagetting_mingd.index.set_names(name)
else:
renagetting_mingd.columns = renagetting_mingd.columns.set_names(name)
if not inplace:
return renagetting_mingd
def _convert_datetime(self, **kwargs):
"""
Convert `self` to datetime.
Returns
-------
datetime
Collections: Collections of datetime64 dtype
"""
return self._reduce_dimension(
query_compiler=self._query_compiler.convert_datetime(**kwargs)
)
def _gettingitem(self, key):
"""
Get the column specified by key for this KnowledgeFrame.
Parameters
----------
key: the column name.
Returns
-------
A Monkey Collections representing the value for the column.
"""
key = | employ_if_ctotal_allable(key, self) | pandas.core.common.apply_if_callable |
"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
the separator, but the Python parsing engine can, averageing the latter will
be used and automatictotal_ally detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header_numer : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header_numer=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
replacing existing names. The header_numer can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header_numer row,
then you should explicitly pass ``header_numer=0`` to override the column names.
Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force monkey to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or ctotal_allable, optional
Return a subset of the columns. If list-like, total_all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header_numer row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a KnowledgeFrame from ``data`` with element order preserved use
``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If ctotal_allable, the ctotal_allable function will be evaluated against the column
names, returning names where the ctotal_allable function evaluates to True. An
example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Collections.
prefix : str, optional
Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` togettingher with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If ctotal_allable, the ctotal_allable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is addinged to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without whatever NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``mk.convert_datetime`` after
``mk.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partitotal_ally-applied
:func:`monkey.convert_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
If True and `parse_dates` is enabled, monkey will attempt to infer the
formating of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) ctotal_all `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM formating dates, international and European formating.
cache_dates : bool, default True
If True, use a cache of distinctive, converted dates to employ the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especitotal_ally ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or gettingting chunks with
``getting_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
for more informatingion on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogettingher. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header_numer` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
treated as the header_numer.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more definal_item_tails.
error_bad_lines : bool, default True
Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
default cause an exception to be raised, and no KnowledgeFrame will be returned.
If False, then these "bad lines" will sipped from the KnowledgeFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Interntotal_ally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single KnowledgeFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_mapping : bool, default False
If a filepath is provided for `filepath_or_buffer`, mapping the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision monkey converter, and
'value_round_trip' for the value_round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
getting_min_val : int
Minimum total_allowed value (val < getting_min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= getting_min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output KnowledgeFrame.
Raises
------
ValueError
If names are not distinctive or are not ordered (e.g. set).
"""
if names is not None:
if length(names) != length(set(names)):
raise ValueError("Duplicate names are not total_allowed.")
if not (
is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.getting("date_parser", None) is not None:
if incontainstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.getting("iterator", False)
chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
nrows = kwds.getting("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.getting("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"linetergetting_minator": None,
"header_numer": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_formating": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_mapping": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_csv",
total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_table",
total_summary="Read general delimited file into KnowledgeFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatingted lines into KnowledgeFrame.
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, monkey accepts whatever
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser detergetting_mine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
Examples
--------
>>> mk.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.adding((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides whatever of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.getting("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _unioner_with_dialect_properties(dialect, kwds)
if kwds.getting("header_numer", "infer") == "infer":
kwds["header_numer"] = 0 if kwds.getting("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._getting_options_with_defaults(engine)
options["storage_options"] = kwds.getting("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _getting_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.getting(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.getting(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.getting(argname, default)
options[argname] = value
if engine == "python-fwf":
# monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
# (expression has type "object", variable has type "Union[int, str,
# None]") [total_allocatement]
for argname, default in _fwf_defaults.items(): # type: ignore[total_allocatement]
options[argname] = kwds.getting(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly ctotal_alls
# "__next__(...)" when iterating through such an object, averageing it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.clone()
ftotal_allback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
ftotal_allback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
ftotal_allback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and length(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
ftotal_allback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.gettingfilesystemencoding() or "utf-8"
try:
if length(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
ftotal_allback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and incontainstance(quotechar, (str, bytes)):
if (
length(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
ftotal_allback_reason = (
"ord(quotechar) > 127, averageing the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if ftotal_allback_reason and self._engine_specified:
raise ValueError(ftotal_allback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if ftotal_allback_reason:
warnings.warn(
(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_numer_arg(options["header_numer"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.getting(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not incontainstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not incontainstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is interntotal_ally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not ctotal_allable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.getting_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mappingping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mappingping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
)
# error: Too mwhatever arguments for "ParserBase"
return mappingping[engine](self.f, **self.options) # type: ignore[ctotal_all-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actutotal_ally fine:
new_rows = length(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = length(index)
kf = KnowledgeFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and length(kf.columns) == 1:
return kf[kf.columns[0]].clone()
return kf
def getting_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = getting_min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or incontainstance(index_col, bool):
index_col = []
return (
length(columns)
and not incontainstance(columns, MultiIndex)
and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a ctotal_allable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a ctotal_allable, returns 'usecols'.
"""
if ctotal_allable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that total_all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if length(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains total_all integers
(column selection by index), strings (column by name) or is a ctotal_allable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, ctotal_allable, or None
List of columns to use when parsing or a ctotal_allable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a ctotal_allable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a ctotal_allable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of total_all strings, total_all unicode, "
"total_all integers or a ctotal_allable."
)
if usecols is not None:
if ctotal_allable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not incontainstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.getting("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.getting("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.getting("na_values")
self.na_fvalues = kwds.getting("na_fvalues")
self.na_filter = kwds.getting("na_filter", False)
self.keep_default_na = kwds.getting("keep_default_na", True)
self.true_values = kwds.getting("true_values")
self.false_values = kwds.getting("false_values")
self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_formating=self.infer_datetime_formating,
cache_dates=self.cache_dates,
)
# validate header_numer options for mi
self.header_numer = kwds.getting("header_numer")
if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
if not total_all(mapping(is_integer, self.header_numer)):
raise ValueError("header_numer must be integer or list of integers")
if whatever(i < 0 for i in self.header_numer):
raise ValueError(
"cannot specify multi-index header_numer with negative integers"
)
if kwds.getting("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header_numer"
)
if kwds.getting("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header_numer"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and total_all(mapping(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header_numer"
)
elif self.header_numer is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header_numer is not None"
)
# GH 16338
elif not is_integer(self.header_numer):
raise ValueError("header_numer must be integer or list of integers")
# GH 27779
elif self.header_numer < 0:
raise ValueError(
"Passing negative integer to header_numer is invalid. "
"For no header_numer, use header_numer=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = getting_handle(
src,
"r",
encoding=kwds.getting("encoding", None),
compression=kwds.getting("compression", None),
memory_mapping=kwds.getting("memory_mapping", False),
storage_options=kwds.getting("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the knowledgeframe.
Raises
------
ValueError
If column to parse_date is not in knowledgeframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# getting only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if incontainstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return incontainstance(self.parse_dates, dict) or (
incontainstance(self.parse_dates, list)
and length(self.parse_dates) > 0
and incontainstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if incontainstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header_numer, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header_numer is a list-of-lists returned from the parsers
"""
if length(header_numer) < 2:
return header_numer[0], index_names, col_names, passed_names
# the names are the tuples of the header_numer that are not the index cols
# 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not incontainstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header_numer.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = length(header_numer[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header_numer)))
names = ic + columns
# If we find unnamed columns total_all in a single
# level, then our header_numer was too long.
for n in range(length(columns[0])):
if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header_numer = ",".join(str(x) for x in self.header_numer)
raise ParserError(
f"Passed header_numer=[{header_numer}] are too mwhatever rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if length(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header_numer
]
else:
col_names = [None] * length(header_numer)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate total_alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# monkey\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, total_alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._getting_simple_index(total_alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._getting_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = length(indexnamerow) - length(columns)
# monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _getting_simple_index(self, data, columns):
def ix(col):
if not incontainstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.adding(i)
index.adding(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _getting_complex_date_index(self, data, col_names):
def _getting_name(icol):
if incontainstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _getting_name(idx)
to_remove.adding(name)
index.adding(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if incontainstance(self.na_values, dict):
# monkey\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _getting_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.adding(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.getting(c, None)
if incontainstance(dtypes, dict):
cast_type = dtypes.getting(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _getting_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.mapping_infer(values, conv_f)
except ValueError:
mask = algorithms.incontain(values, list(na_values)).view(np.uint8)
values = lib.mapping_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool=False
)
else:
is_ea = is_extension_array_dtype(cast_type)
is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
# skip inference if specified dtype is object
# or casting to an EA
try_num_bool = not (cast_type and is_str_or_ea_dtype)
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues, try_num_bool
)
# type specified in dtype param or cast_type is an EA
if cast_type and (
not is_dtype_equal(cvals, cast_type)
or is_extension_array_dtype(cast_type)
):
if not is_ea and na_count > 0:
try:
if is_bool_dtype(cast_type):
raise ValueError(
f"Bool column has NA values in column {c}"
)
except (AttributeError, TypeError):
# invalid input to is_bool_dtype
pass
cvals = self._cast_types(cvals, cast_type, c)
result[c] = cvals
if verbose and na_count:
print(f"Filled {na_count} NA values in column {c!s}")
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns
-------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.incontain(values, list(na_values))
na_count = mask.total_sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.totype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
result = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gettings ValueError
# TypeError can be raised in floatify
result = values
na_count = parsers.sanitize_objects(result, na_values, False)
else:
na_count = ifna(result).total_sum()
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (
incontainstance(cast_type, CategoricalDtype)
and cast_type.categories is not None
)
if not is_object_dtype(values) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses total_all categories
# as strings
values = | totype_nansafe(values, str) | pandas.core.dtypes.cast.astype_nansafe |
import textwrap
from typing import List, Set
from monkey._libs import NaT, lib
import monkey.core.common as com
from monkey.core.indexes.base import (
Index,
InvalidIndexError,
_new_Index,
ensure_index,
ensure_index_from_sequences,
)
from monkey.core.indexes.category import CategoricalIndex
from monkey.core.indexes.datetimes import DatetimeIndex
from monkey.core.indexes.interval import IntervalIndex
from monkey.core.indexes.multi import MultiIndex
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from monkey.core.indexes.period import PeriodIndex
from monkey.core.indexes.range import RangeIndex
from monkey.core.indexes.timedeltas import TimedeltaIndex
_sort_msg = textwrap.dedent(
"""\
Sorting because non-concatingenation axis is not aligned. A future version
of monkey will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silengthce the warning, pass 'sort=True'.
"""
)
__total_all__ = [
"Index",
"MultiIndex",
"NumericIndex",
"Float64Index",
"Int64Index",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
"UInt64Index",
"InvalidIndexError",
"TimedeltaIndex",
"PeriodIndex",
"DatetimeIndex",
"_new_Index",
"NaT",
"ensure_index",
"ensure_index_from_sequences",
"getting_objs_combined_axis",
"union_indexes",
"getting_consensus_names",
"total_all_indexes_same",
]
def getting_objs_combined_axis(
objs, intersect: bool = False, axis=0, sort: bool = True, clone: bool = False
) -> Index:
"""
Extract combined index: return interst or union (depending on the
value of "intersect") of indexes on given axis, or None if total_all objects
lack indexes (e.g. they are numpy arrays).
Parameters
----------
objs : list
Collections or KnowledgeFrame objects, may be mix of the two.
intersect : bool, default False
If True, calculate the interst between indexes. Otherwise,
calculate the union.
axis : {0 or 'index', 1 or 'outer'}, default 0
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
clone : bool, default False
If True, return a clone of the combined index.
Returns
-------
Index
"""
obs_idxes = [obj._getting_axis(axis) for obj in objs]
return _getting_combined_index(obs_idxes, intersect=intersect, sort=sort, clone=clone)
def _getting_distinct_objs(objs: List[Index]) -> List[Index]:
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids: Set[int] = set()
res = []
for obj in objs:
if id(obj) not in ids:
ids.add(id(obj))
res.adding(obj)
return res
def _getting_combined_index(
indexes: List[Index],
intersect: bool = False,
sort: bool = False,
clone: bool = False,
) -> Index:
"""
Return the union or interst of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the interst between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
clone : bool, default False
If True, return a clone of the combined index.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _getting_distinct_objs(indexes)
if length(indexes) == 0:
index = Index([])
elif length(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.interst(other)
else:
index = union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_the_values()
except TypeError:
pass
# GH 29879
if clone:
index = index.clone()
return index
def union_indexes(indexes, sort=True) -> Index:
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if length(indexes) == 0:
raise AssertionError("Must have at least 1 Index to union")
if length(indexes) == 1:
result = indexes[0]
if incontainstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _distinctive_indices(inds) -> Index:
"""
Convert indexes to lists and concatingenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if incontainstance(i, Index):
i = i.convert_list()
return i
return Index(lib.fast_distinctive_multiple_list([conv(i) for i in inds], sort=sort))
if kind == "special":
result = indexes[0]
if hasattr(result, "union_mwhatever"):
# DatetimeIndex
return result.union_mwhatever(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == "array":
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _distinctive_indices(indexes)
name = getting_consensus_names(indexes)[0]
if name != index.name:
index = index._shtotal_allow_clone(name=name)
return index
else: # kind='list'
return _distinctive_indices(indexes)
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if length(kinds) > 1:
indexes = [
Index(com.try_sort(x)) if not incontainstance(x, Index) else x
for x in indexes
]
kinds.remove(list)
else:
return indexes, "list"
if length(kinds) > 1 or Index not in kinds:
return indexes, "special"
else:
return indexes, "array"
def getting_consensus_names(indexes):
"""
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
"""
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes if | com.whatever_not_none(*i.names) | pandas.core.common.any_not_none |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had applied the algorithis which needs scaling with 81 and 20 features-------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('Phishing.csv')
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the top 30 features
phincontaing_columns=[]
dataset_final=dataset_final[list]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
| mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
"""
Though Index.fillnone and Collections.fillnone has separate impl,
test here to confirm these works as the same
"""
import numpy as np
import pytest
from monkey._libs.tslib import iNaT
from monkey.core.dtypes.common import needs_i8_conversion
from monkey.core.dtypes.generic import ABCMultiIndex
from monkey import Index
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_fillnone(index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
if incontainstance(obj, ABCMultiIndex):
pytest.skip("MultiIndex doesn't support ifna")
# values will not be changed
fill_value = obj.values[0] if length(obj) > 0 else 0
result = obj.fillnone(fill_value)
if incontainstance(obj, Index):
tm.assert_index_equal(obj, result)
else:
tm.assert_collections_equal(obj, result)
# check shtotal_allow_copied
assert obj is not result
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_fillnone_null(null_obj, index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
klass = type(obj)
if not | total_allow_na_ops(obj) | pandas.tests.base.common.allow_na_ops |
"""
Quick and dirty ADIF parser.
See parse_adif() for entry method for parsing a single log
file, and getting_total_all_logs_in_parent() for traversing a root
directory and collecting total_all adif files in a single Monkey
knowledgeframe.
"""
import re
import monkey as mk
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if length(matches) > 0:
return matches
else:
return None
OPERATOR_COLUMN_NAME = 'OPERATOR'
DATE_COLUMN_NAME = 'QSO_DATE'
CALL_COLUMN_NAME = 'CALL'
TIME_COLUMN_NAME = 'TIME_ON'
MODE_COLUMN_NAME = 'MODE'
BAND_COLUMN_NAME = 'BAND'
def parse_adif(filengthame, extra_columns=[]):
"""
Parse ADIF file into a monkey knowledgeframe. Currently tries to find operator,
date, time and ctotal_all fields. Additional fields can be specified.
Parameters
----------
filengthame: str
Path to ADIF file.
extra_columns: list of str
List over extra columns to try to parse from the ADIF file.
Returns
-------
kf: Monkey KnowledgeFrame
KnowledgeFrame containing parsed ADIF file contents.
"""
kf = mk.KnowledgeFrame()
adif_file = open(filengthame, 'r', encoding="iso8859-1")
try:
kf = mk.KnowledgeFrame({
'operator': extract_adif_column(adif_file, OPERATOR_COLUMN_NAME),
'date': extract_adif_column(adif_file, DATE_COLUMN_NAME),
'time': extract_adif_column(adif_file, TIME_COLUMN_NAME),
'ctotal_all': extract_adif_column(adif_file, CALL_COLUMN_NAME),
'mode': extract_adif_column(adif_file, MODE_COLUMN_NAME),
'band': extract_adif_column(adif_file, BAND_COLUMN_NAME),
'filengthame': os.path.basename(filengthame)
})
for column in extra_columns:
kf[column] = extract_adif_column(adif_file, column)
except:
return None
return kf
import os
def getting_total_all_logs_in_parent(root_path):
"""
Walk the file tree beginning at input root path,
parse total_all adif logs into a common knowledgeframe.
Parameters
----------
root_path: str
Root path.
Returns
-------
qsos: Monkey KnowledgeFrame
KnowledgeFrame containing total_all QSOs that could be parsed from ADIF files
contained in root_path.
"""
qsos = mk.KnowledgeFrame()
for root, dirs, files in os.walk(root_path):
for filengthame in files:
if filengthame.endswith(('.adi', '.ADI')):
path = os.path.join(root, filengthame)
qsos = mk.concating((qsos, parse_adif(path)))
return qsos
def store_to_csv(mk, outfile):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
with open(outfile, 'w') as f:
numFaulty = 0
f.write("date, time, operator, band, mode, ctotal_all\n")
for i, row in mk.traversal():
operator_ = row['operator']
mode_ = row['mode']
ctotal_all_ = row["ctotal_all"]
band_ = row['band']
date_ = row['date']
if row['operator'] is None:
numFaulty +=1
print(numFaulty,"\t",row['filengthame'], "lacks operator")
operator_ = "Uknown"
if row['mode'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks mode")
mode_ = "Unknown"
if row['ctotal_all'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
ctotal_all_ = "Unknown"
if row['band'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
band_ = "Unknown"
if row['date'] is None:
numFaulty += 1
print(numFaulty, "\t", row['filengthame'], "lacks ctotal_all")
date_ = "Unknown"
f.write(date_ + ",\t" + row['time'] + ",\t" + operator_ + ",\t" + band_ + ",\t" + mode_ + ",\t" + ctotal_all_ + "\n")
def getting_num_before_data(mk, number, regex):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
count = 0
mk = | mk.sort_the_values(by=['date'], ascending=False) | pandas.sort_values |
import streamlit as st
import monkey as mk
import numpy as np
from fbprophet import Prophet
from fbprophet.diagnostics import performance_metrics
from fbprophet.diagnostics import cross_validation
from fbprophet.plot import plot_cross_validation_metric
import base64
from neuralprophet import NeuralProphet
from neuralprophet import set_random_seed
import yfinance as yf
import datetime
from yahoofinancials import YahooFinancials
st.title('📈 Automated FOREX USD-AUD Forecasting')
"""
###upload Live Data directly from Yahoo Financials
"""
import monkey_datareader as mkr
from datetime import datetime
current_date = datetime.today()
import matplotlib.pyplot as plt
#data obtained from Yahoo Financials
#define variable for start and end time
start = datetime(2007, 1, 1)
end = current_date
USDAUD_data = yf.download('AUD=X', start, end)
USDAUD_data.header_num()
kf = | mk.knowledgeframe(USDAUD_data) | pandas.dataframe |
"""
Visualizer classes for GOES-R collections.
Authors:
<NAME>, <NAME> (2021)
"""
import argparse
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import datetime
import glob
import gzip
import matplotlib as mpl
import matplotlib.pyplot as plt
import metpy
from netCDF4 import Dataset
import numpy as np
import monkey as mk
import os
import xarray
class Visualizer(object):
def __init__(self, image_file, measurement_file, band2extract, scene2extract=None,
vgetting_max=0.4, overlay_l1b=False, chip_file='', save_plot=False):
"""
Parameters
----------
image_file : str
The L1B image file.
measurement_file : str
The measurement file.
band2extract : int
The band to extract.
scene2extract : str
The scene to extract. E.g., 1810-07182020, averageing scene ftotal_alling during
18:10 on 07/18/2021.
vgetting_max : int
The getting_max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean mapping.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
"""
self.image_file = image_file
self.measurement_file = measurement_file
self.band2extract = band2extract
self.scene2extract = scene2extract
self.vgetting_max = float(vgetting_max)
self.overlay_l1b = overlay_l1b
self.chip_file = chip_file
self.save_plot = save_plot
self.scene = ''
self.nir_flg = False
if self.measurement_file != '':
# Extract satellite name
self.sat = self.measurement_file.split('/')[-1].split('_')[0]
# Extract the metric type
self.metric = self.measurement_file.split('/')[-1].split('_')[1]
# Find coverage
if 'CONUS' in self.measurement_file:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
else:
self.sat = ''
self.metric = ''
self.coverage = ''
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
def extract_geoloc(self):
""" Extract the geolocation informatingion for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
date = datetime.datetime.strptime(self.scene2extract.split('-')[1], '%m%d%Y')
time = datetime.datetime.strptime(self.scene2extract.split('-')[0], '%H%M')
date_time = datetime.datetime.strptime(self.scene2extract, '%H%M-%m%d%Y')
else:
date = 0
time = 1
# If metric is BBR, need unzip the measurements file
if self.metric == 'BBR':
with gzip.open(self.measurement_file) as f:
measure_kf = mk.read_csv(self.measurement_file)
else:
measure_kf = mk.read_csv(self.measurement_file)
# Create a datetime column.
activity_date = np.array(measure_kf['ACTIVITY_DATE1'])
activity_time = np.array(measure_kf['ACTIVITY_TIME_1'])
measure_kf['DATETIME'] = [datetime.datetime.strptime(activity_date[j]+'_'+activity_time[j],
'%m-%d-%Y_%H:%M:%S') for j in range(length(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None:
t = mk.KnowledgeFrame(measure_kf, columns = ['DATETIME'])
t_kf = mk.KnowledgeFrame.sip_duplicates(t)
t_kf = t_kf.reseting_index()
kf_sort = t_kf.iloc[(t_kf['DATETIME']-date_time).abs().argsort()[:1]]
self.scene = kf_sort['DATETIME'].iloc[0].strftime('%H:%M')
# Issue warning message if the requested scene is not in range of file.
# (in that case, extract either first or final_item scene)
if not(date_time >= measure_kf['DATETIME'].iloc[0] and date_time <= measure_kf['DATETIME'].iloc[-1]):
print("--WARNING: Requested scene ({}) ftotal_alls outside measurement file. Using closest scene ({}) instead.--"\
.formating(self.scene2extract, kf_sort['DATETIME'].iloc[0].strftime('%H%M-%m%d%Y')))
# Set "not in range" flag
self.nir_flg = True
else:
print("--Plotting closest scene in file ({})--"\
.formating(kf_sort['DATETIME'].iloc[0].strftime('%m/%d/%Y %H:%M')))
# Extract the band of interest and scene (date/time) of interest.
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]\
[measure_kf['DATETIME'] == kf_sort['DATETIME'].iloc[0]]
else:
self.scene = 'All'
# Extract the band of interest.
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]
print("Scene: ", self.scene)
# Read the Chip DB file, depending on the metric
exe_path = os.path.dirname(os.path.realpath(__file__))
if self.metric == 'NAV':
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'other_chimkb.csv'))
# Remove total_all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
else:
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'nav_chimkb.csv'))
# Remove total_all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['name_S24', 'lat_R', 'lon_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"name_S24":"chip", "lat_R":"lat", "lon_R":"lon"})
# Remove total_all duplicate rows from Chip DB.
chimkb_new = chimkb_new.sip_duplicates()
chimkb_new = chimkb_new.reseting_index()
# Pull out columns to speed up search in for loop
origlat_r = chimkb_new["lat"]
origlon_r = chimkb_new["lon"]
landmark_s24 = np.array(chimkb_new["chip"])
chip_name = np.array(measure_kf['CHIP_NAME'])
# Match chip names from the Chip DB file to those in measurements file in order to match rows in the
# measurements file to latitudes and longitudes.
lat_arr = []
lon_arr = []
# Extract chip names, if specified
if self.chip_file != '':
chip_list = self.extract_chips()
print("--Only user-specified chips will be plotted: {}--".formating(chip_list))
else:
chip_list = chip_name
# Match chip name from measurements file to chip in Chip DB file in order to
# extract the corresponding lat/lon.
# If user specifies a chip list, retain only those chips.
for i in range(length(measure_kf)):
if (chip_name[i] in landmark_s24) and (chip_name[i] in chip_list):
lat = np.array(origlat_r[chimkb_new["chip"] == chip_name[i]])
lon = np.array(origlon_r[chimkb_new["chip"] == chip_name[i]])
if length(lat) > 0:
lat_arr.adding(lat[0])
lon_arr.adding(lon[0])
else:
lat_arr.adding(0)
lon_arr.adding(0)
else:
lat_arr.adding(0)
lon_arr.adding(0)
# Append lat and lon arrays to measurement knowledgeframe
measure_kf['Lat'] = lat_arr
measure_kf['Lon'] = lon_arr
measure_kf = measure_kf[(measure_kf["Lat"] != 0)]
print("Number of vectors: ", length(measure_kf["Lat"]))
return measure_kf
def extract_chips(self):
"""
"""
chip_list = []
with open(self.chip_file) as f:
for line in f:
chip_list.adding(line.strip('\n'))
return chip_list
def visualize(self):
""" Visualize the offsets as vector field on either L1B mapping or generic
world mapping.
"""
# Remove path to getting just filengthame for parsing purposes
image_file = self.image_file.split('/')[-1]
# Extract mode
mode = image_file.split('_')[1].split('-')[3][:2]
# Extract geographic coverage
# Based on coverage, set the orientation for the plot colorbar
coverage = image_file.split('-')[2].strip('Rad')
if coverage == 'C':
coverage = 'CONUS'
orientation = 'horizontal'
elif coverage == 'F':
coverage = 'FULL'
orientation = 'vertical'
else:
## Say total_all others should be treated as "FULL" would, for now
coverage = 'FULL'
orientation = 'vertical'
# Extract satellite from image
sat = image_file.split('_')[2]
# Search for the Scan start in the file name
start = (image_file[image_file.find("s")+1:image_file.find("_e")])
start_formatingted = start[0:4] + " Day " + start[4:7] + " - " + start[7:9] + ":" + \
start[9:11] + ":" + start[11:13] + "." + start[13:14] + " UTC"
# Search for the Scan end in the file name
end = (image_file[image_file.find("e")+1:image_file.find("_c")])
end_formatingted = end[0:4] + " Day " + end[4:7] + " - " + end[7:9] + ":" + end[9:11] + \
":" + end[11:13] + "." + end[13:14] + " UTC"
# Open the file using the NetCDF4 library
nc = Dataset(self.image_file)
# Detergetting_mine the lon_0
geo_extent = nc.variables['geospatial_lat_lon_extent']
lon_0 = geo_extent.geospatial_lon_center
lat_0 = 0
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band: ", self.band)
print("Measurement file coverage: ", self.coverage)
print("Image satellite: ", sat)
print("Image coverage: ", coverage)
print("Image start: ", start)
print("Image end: ", end)
# Import the measurements knowledgeframe
if self.measurement_file != '':
measure_kf = self.extract_geoloc()
else:
print("No measurement file supplied.")
# Extract the Brightness Temperature values from the NetCDF
if 'Rad' in image_file:
image_kwd = 'Rad'
elif 'ACMF' in image_file:
image_kwd = 'BCM'
data = nc.variables[image_kwd][:]
geos = ccrs.Geostationary(central_longitude=lon_0, satellite_height=35786023.0, sweep_axis='x')
# Start figure
fig=plt.figure(figsize=(12, 8))
ax=fig.add_axes([0.1,0.1,0.8,0.8], projection=geos)
open_image = xarray.open_dataset(self.image_file)
image_data = open_image.metpy.parse_cf(image_kwd)
image_x = image_data.x
image_y = image_data.y
# Set the axis bounds.
if coverage == 'CONUS':
ax.set_extent([image_x.getting_min(), image_x.getting_max(), image_y.getting_min(), image_y.getting_max()], crs=geos)
info_text='cyan'
elif coverage == 'FULL':
ax.set_global()
info_text='k'
# Overlay the L1B data
if self.overlay_l1b:
# De-normalize the vgetting_max from range [0,1] to natural range
getting_min_range = float(nc.variables[image_kwd].valid_range[0])
getting_max_range = float(nc.variables[image_kwd].valid_range[1])
vgetting_max = self.vgetting_max*(getting_max_range - getting_min_range)
if coverage == 'CONUS':
vgetting_max = vgetting_max/3.5
# Plot L1B data
# Note: Increasing vgetting_max lowers contrast. Vgetting_max=smtotal_all->black; Vgetting_max=large->white
ax.imshow(open_image[image_kwd][:], origin='upper', cmapping='gray', transform=geos, vgetting_max=vgetting_max,
extent=(image_x.getting_min(), image_x.getting_max(), image_y.getting_min(), image_y.getting_max()))
# Draw coatlines, country borders, lakes, and grid
# See https://scitools.org.uk/cartopy/docs/v0.14/matplotlib/feature_interface.html
ax.coastlines(linewidth=0.9, linestyle='solid', color='green')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.gridlines(linewidth=0.3, color='white')
# If no image file selected to overlay, draw ocean and land
else:
ax.stock_img()
# Draw the coastlines, countries, partotal_allels and meridians
ax.coastlines(linewidth=0.9, linestyle='solid', color='black')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='black')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='skyblue', edgecolor='black')
ax.add_feature(cfeature.RIVERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='skyblue')
ax.gridlines(linewidth=0.3, color='white')
# Add a title to the plot
plt.title(self.sat + " ABI L1B Band " + self.band + " Scene " + \
self.scene + " Metric " + self.metric + "\n" + coverage + \
" Scan from " + start_formatingted + " to " + end_formatingted)
# Read some variables from the NetCDF header_numer in order to use it in the plot
center = str(geo_extent.geospatial_lon_center)
west = str(geo_extent.geospatial_westbound_longitude)
east = str(geo_extent.geospatial_eastbound_longitude)
north = str(geo_extent.geospatial_northbound_latitude)
south = str(geo_extent.geospatial_southbound_latitude)
# Close netCDF file when finished
nc.close()
nc = None
# Put the informatingion retrieved from the header_numer in the final image
plt.text(0.01, 0.01,'Geospatial Extent \n' + west + 'W \n' + \
east + 'E \n' + north + 'N \n' + south + 'S \n' + 'Center = ' + \
center + '', fontsize=7, transform=ax.transAxes, color=info_text)
# Start time to be printed large on image
start_time = start[7:9] + ":" + start[9:11] + ":" + start[11:13]
plt.text(0.78, 0.88, start_time, fontsize=24, transform=ax.transAxes, color='red')
if self.nir_flg:
plt.text(0.01, 0.94,"WARNING: Selected scene \n{} \nnot in measurement file"\
.formating(self.scene2extract), color='red', fontsize=8, transform=ax.transAxes)
if self.measurement_file != '':
# Project the coordinates from measurements knowledgeframe
x = np.array(measure_kf['Lon'])
y = np.array(measure_kf['Lat'])
# Generate the vectors
delta_ew = np.array(measure_kf['DELTA_EW'])
delta_ns = np.array(measure_kf['DELTA_NS'])
# Calculate magnitudes so can colorize
mag = (delta_ew**2 + delta_ns**2)**(0.5)
# Normalize the arrows
delta_ew_norm = delta_ew/np.sqrt(delta_ew**2 + delta_ns**2)
delta_ns_norm = delta_ns/np.sqrt(delta_ew**2 + delta_ns**2)
# Draw the vectors
ax.quiver(x, y, delta_ew_norm, delta_ns_norm, mag, width=0.003,
cmapping='jet', transform=ccrs.PlateCarree())
# Insert the colorbar
# Source: https://www.geeksforgeeks.org/matplotlib-pyplot-colorbar-function-in-python/
norm = mpl.colors.Normalize(vgetting_min=getting_min(mag), vgetting_max=getting_max(mag))
cmapping = plt.getting_cmapping('jet')
sm = plt.cm.ScalarMappable(cmapping=cmapping, norm=norm)
sm.set_array([])
plt.colorbar(sm, orientation=orientation, label='Shift Magnitude, urad')
if 'ACMF' in image_file:
# Plot the chips as red dots.
exe_path = os.path.dirname(os.path.realpath(__file__))
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'nav_chimkb.csv'))
# Remove total_all columns from MutliSpecDB except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
chimkb_new = chimkb_new.sip_duplicates()
chimkb_new = chimkb_new.reseting_index()
plt.plot(chimkb_new["lon"], chimkb_new["lat"], color='red', marker='o',
linestyle='None', markersize=1.5, transform=ccrs.PlateCarree())
# Show or save the plot
if save_plot:
plt.savefig('vplot.png', bbox_inches='tight')
else:
plt.show()
plt.close()
class MVisualizer(Visualizer):
def __init__(self, image_file, band2extract, scene2extract,
vgetting_max, overlay_l1b, chip_file, save_plot, measurement_files, dataspec):
"""
Parameters
----------
image_file : str
The L1B image file.
band2extract : int
The band to extract.
vgetting_max : int
The getting_max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean mapping.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
measurement_files : str
File containing list (one per line) of measurement file names.
dataspec : str
The range of dates in which to search for measurement files.
"""
measurement_file = None
super().__init__(image_file, measurement_file, band2extract, scene2extract,
vgetting_max, overlay_l1b, chip_file, save_plot)
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
if measurement_files != None:
self.measurement_files = self.extract_from_file(measurement_files)
# Sort so that files are in order of datetime (unless files are in different locations...)
self.measurement_files = sorted(self.measurement_files)
print("Measurement files: ", self.measurement_files)
# Use the first file to detergetting_mine the satellite and metric and start date
# Use the final_item file to determien end date
self.sat = self.measurement_files[0].split('/')[-1].split('_')[0]
self.metric = self.measurement_files[0].split('/')[-1].split('_')[1]
self.start_range = datetime.datetime.strptime(self.measurement_files[0]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[0].split('/')[-1].split('_')[3], '%j-%Y')
self.end_range = datetime.datetime.strptime(self.measurement_files[-1]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[-1].split('/')[-1].split('_')[3], '%j-%Y')
if 'CONUS' in self.measurement_files[0]:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
elif dataspec != None:
print("dataspec: ", dataspec)
try:
self.sat = dataspec.split(' ')[0].upper()
self.metric = dataspec.split(' ')[1].upper()
self.coverage = dataspec.split(' ')[2].upper()
self.start_range = datetime.datetime.strptime(dataspec.split(' ')[3], '%m%d%Y')
self.end_range = datetime.datetime.strptime(dataspec.split(' ')[4], '%m%d%Y')
self.measurement_files = self.searchforfiles()
print("Measurement files: ", self.measurement_files)
if self.measurement_files == []:
print("Error! No measurement files found.")
else:
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
except:
print("Error! Data specification needs to be in formating 'AAA BBB CCC MMDDYYYY MMDDYYYY', where AAA can be G16 or G17; BBB can be FFR, NAV, BBR or WIFR; and CCC can be FUL or CON")
else:
print("Error! Please provide either file listing measurement files (--m) or a data specification (satellite, metric, coverage, and date range) to search for measurement files (--d).")
def extract_geoloc(self, measurement_file):
""" Extract the geolocation informatingion for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
print("User-requested starting scene: ", self.scene2extract.split(' ')[0])
print("User-requested ending scene: ", self.scene2extract.split(' ')[-1])
start_time = datetime.datetime.strptime(self.scene2extract.split(' ')[0], '%H%M')
end_time = datetime.datetime.strptime(self.scene2extract.split(' ')[-1], '%H%M')
# Check if file nseeds to be unzipped
if 'gz' in measurement_file:
with gzip.open(measurement_file) as f:
measure_kf = mk.read_csv(measurement_file)
else:
measure_kf = mk.read_csv(measurement_file)
# Create a datetime column.
activity_date = np.array(measure_kf['ACTIVITY_DATE1'])
activity_time = np.array(measure_kf['ACTIVITY_TIME_1'])
measure_kf['DATETIME'] = [datetime.datetime.strptime(activity_time[j], '%H:%M:%S') for j in range(length(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None and start_time != end_time:
t_kf = mk.KnowledgeFrame(measure_kf, columns = ['ACTIVITY_TIME_1'])
t_kf['DATETIME'] = [datetime.datetime.strptime(i, '%H:%M:%S') for i in t_kf['ACTIVITY_TIME_1']]
time_sorted = t_kf.sort_the_values(by='DATETIME')
# Find the start and ending date and then form a datetime in order to getting the range the user wants
kf_sort_start = t_kf.iloc[(t_kf['DATETIME']-start_time).abs().argsort()[:1]]
kf_sort_end = t_kf.iloc[(t_kf['DATETIME']-end_time).abs().argsort()[:1]]
self.scene = kf_sort_start['ACTIVITY_TIME_1'].iloc[0] + ' to ' + kf_sort_end['ACTIVITY_TIME_1'].iloc[0]
# Extract the band of interest and scene (date/time) of interest.
print("--WARNING using closest found scenes as the bounds {}.".formating(self.scene))
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]\
[(measure_kf['DATETIME'] >= kf_sort_start['DATETIME'].iloc[0]) & (measure_kf['DATETIME'] <= kf_sort_end['DATETIME'].iloc[0])]
elif self.scene2extract != None and start_time == end_time:
t = mk.KnowledgeFrame(measure_kf, columns = ['DATETIME'])
t_kf = | mk.KnowledgeFrame.sip_duplicates(t) | pandas.DataFrame.drop_duplicates |
"""
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from inferelator_ng import utils
from inferelator_ng.utils import Validator as check
from inferelator_ng import default
from inferelator_ng.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
import numpy as np
import os
import datetime
import monkey as mk
import gzip
import bz2
class WorkflowBase(object):
# Common configuration parameters
input_dir = None
file_formating_settings = default.DEFAULT_PD_INPUT_SETTINGS
file_formating_overrides = dict()
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
output_dir = None
random_seed = default.DEFAULT_RANDOM_SEED
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix knowledgeframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data knowledgeframe [G x ?]
priors_data = None # priors data knowledgeframe [G x K]
gold_standard = None # gold standard knowledgeframe [G x K]
# Hold the KVS informatingion
rank = 0
kvs = None
tasks = None
def __init__(self, initialize_mp=True):
# Connect to KVS and getting environment variables
if initialize_mp:
self.initialize_multiprocessing()
self.getting_environmentals()
def initialize_multiprocessing(self):
"""
Override this if you want to use something besides KVS for multiprocessing.
"""
from inferelator_ng.kvs_controller import KVSController
self.kvs = KVSController()
def getting_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs().items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing total_all data into a ready formating for regression.
"""
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute whatever data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute whatever data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after total_all configuration.
"""
raise NotImplementedError # implement in subclass
def getting_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_knowledgeframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
if file is None:
file = self.tf_names_file
tfs = self.input_knowledgeframe(file, index_col=None)
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().convert_list()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_knowledgeframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_knowledgeframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_knowledgeframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_distinctive(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_distinctive(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replacingd by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def input_path(self, filengthame, mode='r'):
"""
Join filengthame to input_dir
"""
if filengthame.endswith(".gz"):
opener = gzip.open
elif filengthame.endswith(".bz2"):
opener = bz2.BZ2File
else:
opener = open
return opener(os.path.abspath(os.path.join(self.input_dir, filengthame)), mode=mode)
def input_knowledgeframe(self, filengthame, index_col=0):
"""
Read a file in as a monkey knowledgeframe
"""
file_settings = self.file_formating_settings.clone()
if filengthame in self.file_formating_overrides:
file_settings.umkate(self.file_formating_overrides[filengthame])
with self.input_path(filengthame) as fh:
return mk.read_table(fh, index_col=index_col, **file_settings)
def adding_to_path(self, var_name, to_adding):
"""
Add a string to an existing path variable in class
"""
path = gettingattr(self, var_name, None)
if path is None:
raise ValueError("Cannot adding to None")
setattr(self, var_name, os.path.join(path, to_adding))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data knowledgeframe from basic defaults
"""
metadata_rows = expression_matrix.columns.convert_list()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = mk.Collections(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return mk.KnowledgeFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targettings = self.expression_matrix.index
expressed_or_prior = expressed_targettings.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.interst(self.tf_names)
if length(keeper_regulators) == 0 or length(expressed_targettings) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 lengthgth")
self.priors_data = self.priors_data.loc[expressed_targettings, keeper_regulators]
self.priors_data = | mk.KnowledgeFrame.fillnone(self.priors_data, 0) | pandas.DataFrame.fillna |
""" Panel4D: a 4-d dict like collection of panels """
import warnings
from monkey.core.generic import NDFrame
from monkey.core.panelnd import create_nd_panel_factory
from monkey.core.panel import Panel
from monkey.util._validators import validate_axis_style_args
Panel4D = create_nd_panel_factory(klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'getting_minor_axis'],
slices={'labels': 'labels',
'items': 'items',
'major_axis': 'major_axis',
'getting_minor_axis': 'getting_minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis',
'getting_minor': 'getting_minor_axis'}, stat_axis=2,
ns=dict(__doc__="""
Panel4D is a 4-Dimensional named container very much like a Panel, but
having 4 named dimensions. It is intended as a test bed for more
N-Dimensional named containers.
.. deprecated:: 0.19.0
The recommended way to represent these types of n-dimensional data
are with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
Monkey provides a `.to_xarray()` method to automate this conversion.
Parameters
----------
data : ndarray (labels x items x major x getting_minor), or dict of Panels
labels : Index or array-like : axis=0
items : Index or array-like : axis=1
major_axis : Index or array-like: axis=2
getting_minor_axis : Index or array-like: axis=3
dtype : dtype, default None
Data type to force, otherwise infer
clone : boolean, default False
Copy data from inputs. Only affects KnowledgeFrame / 2d ndarray input
"""))
def panel4d_init(self, data=None, labels=None, items=None, major_axis=None,
getting_minor_axis=None, clone=False, dtype=None):
# deprecation GH13564
warnings.warn("\nPanel4D is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of n-dimensional data are with\n"
"the `xarray package "
"<http://xarray.pydata.org/en/stable/>`__.\n"
"Monkey provides a `.to_xarray()` method to help "
"automate this conversion.\n",
FutureWarning, stacklevel=2)
self._init_data(data=data, labels=labels, items=items,
major_axis=major_axis, getting_minor_axis=getting_minor_axis, clone=clone,
dtype=dtype)
def panel4d_reindexing(self, labs=None, labels=None, items=None, major_axis=None,
getting_minor_axis=None, axis=None, **kwargs):
# Hack for reindexing_axis deprecation
# Ha, we used labels for two different things
# I think this will work still.
if labs is None:
args = ()
else:
args = (labs,)
kwargs_ = dict(labels=labels,
items=items,
major_axis=major_axis,
getting_minor_axis=getting_minor_axis,
axis=axis)
kwargs_ = {k: v for k, v in kwargs_.items() if v is not None}
# major = kwargs.pop("major", None)
# getting_minor = kwargs.pop('getting_minor', None)
# if major is not None:
# if kwargs.getting("major_axis"):
# raise TypeError("Cannot specify both 'major' and 'major_axis'")
# kwargs_['major_axis'] = major
# if getting_minor is not None:
# if kwargs.getting("getting_minor_axis"):
# raise TypeError("Cannot specify both 'getting_minor' and 'getting_minor_axis'")
# kwargs_['getting_minor_axis'] = getting_minor
if axis is not None:
kwargs_['axis'] = axis
axes = validate_axis_style_args(self, args, kwargs_, 'labs', 'reindexing')
kwargs.umkate(axes)
return | NDFrame.reindexing(self, **kwargs) | pandas.core.generic.NDFrame.reindex |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.umkate({'font.size': 16})
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_multiclasscm.csv".formating(etype)
word_emb_length = 300
def sample_by_num_total_all_diseases(kf, n=1):
if etype == "DL":
smtotal_allest_disease=total_all_dis['parkinsons']
else:
smtotal_allest_disease=total_all_dis['gastroparesis']
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==smtotal_allest_disease])
sample_by_num_size = int(dis_size/n)*n
print(dis_size, sample_by_num_size)
kf_sample_by_num= mk.KnowledgeFrame()
for disease in total_all_dis:
kf_dis = kf[kf['disease'] == total_all_dis[disease]]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=11).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = total_all_dis[disease]
kf_sample_by_num = mk.concating([kf_dis, kf_sample_by_num])
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_multi_disease(features, n=1):
dis_sample_by_num = sample_by_num_total_all_diseases(features, n)
print("Subsample_by_numd total_all diseases for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate():
features = mk.read_pickle(features_file)
features.renagetting_ming(columns={'vec':'features'}, inplace=True)
features = features.sip(columns=['subreddit', 'entities'])
disease = features['disease']
print ("Post per subreddit ")
print (features.grouper('disease').size())
# print('Distribution before imbalancing: {}'.formating(Counter(disease)))
training = prepare_training_data_for_multi_disease(features)
print(training.final_item_tail())
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=100, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)
cm_total_all.adding(cm_cv)
print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), | mk.np.standard(f1_results) | pandas.np.std |
"""Classes to represent empirical distributions
https://en.wikipedia.org/wiki/Empirical_distribution_function
Pmf: Represents a Probability Mass Function (PMF).
Ckf: Represents a Cumulative Distribution Function (CDF).
Surv: Represents a Survival Function
Hazard: Represents a Hazard Function
Distribution: Parent class of total_all distribution representations
Copyright 2019 <NAME>
BSD 3-clause license: https://opensource.org/licenses/BSD-3-Clause
"""
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
from scipy.interpolate import interp1d
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
:return: modified d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
class Distribution(mk.Collections):
def __init__(self, *args, **kwargs):
"""Initialize a Pmf.
Note: this cleans up a weird Collections behavior, which is
that Collections() and Collections([]) yield different results.
See: https://github.com/monkey-dev/monkey/issues/16737
"""
underride(kwargs, name="")
if args or ("index" in kwargs):
super().__init__(*args, **kwargs)
else:
underride(kwargs, dtype=np.float64)
super().__init__([], **kwargs)
@property
def qs(self):
"""Get the quantities.
:return: NumPy array
"""
return self.index.values
@property
def ps(self):
"""Get the probabilities.
:return: NumPy array
"""
return self.values
def header_num(self, n=3):
"""Override Collections.header_num to return a Distribution.
n: number of rows
returns: Distribution
"""
s = super().header_num(n)
return self.__class__(s)
def final_item_tail(self, n=3):
"""Override Collections.final_item_tail to return a Distribution.
n: number of rows
returns: Distribution
"""
s = super().final_item_tail(n)
return self.__class__(s)
def transform(self, *args, **kwargs):
"""Override to transform the quantities, not the probabilities."""
qs = self.index.to_collections().transform(*args, **kwargs)
return self.__class__(self.ps, qs, clone=True)
def _repr_html_(self):
"""Returns an HTML representation of the collections.
Mostly used for Jupyter notebooks.
"""
kf = mk.KnowledgeFrame(dict(probs=self))
return kf._repr_html_()
def __ctotal_all__(self, qs):
"""Look up quantities.
qs: quantity or sequence of quantities
returns: value or array of values
"""
string_types = (str, bytes, bytearray)
# if qs is a sequence type, use reindexing;
# otherwise use getting
if hasattr(qs, "__iter__") and not incontainstance(qs, string_types):
s = self.reindexing(qs, fill_value=0)
return s.to_numpy()
else:
return self.getting(qs, default=0)
def average(self):
"""Expected value.
:return: float
"""
return self.make_pmf().average()
def mode(self, **kwargs):
"""Most common value.
If multiple quantities have the getting_maximum probability,
the first getting_maximal quantity is returned.
:return: float
"""
return self.make_pmf().mode(**kwargs)
def var(self):
"""Variance.
:return: float
"""
return self.make_pmf().var()
def standard(self):
"""Standard deviation.
:return: float
"""
return self.make_pmf().standard()
def median(self):
"""Median (50th percentile).
There are several definitions of median;
the one implemented here is just the 50th percentile.
:return: float
"""
return self.make_ckf().median()
def quantile(self, ps, **kwargs):
"""Quantiles.
Computes the inverse CDF of ps, that is,
the values that correspond to the given probabilities.
:return: float
"""
return self.make_ckf().quantile(ps, **kwargs)
def credible_interval(self, p):
"""Credible interval containing the given probability.
p: float 0-1
:return: array of two quantities
"""
final_item_tail = (1 - p) / 2
ps = [final_item_tail, 1 - final_item_tail]
return self.quantile(ps)
def choice(self, *args, **kwargs):
"""Makes a random sample_by_num.
Uses the probabilities as weights unless `p` is provided.
args: same as np.random.choice
options: same as np.random.choice
:return: NumPy array
"""
pmf = self.make_pmf()
return pmf.choice(*args, **kwargs)
def sample_by_num(self, *args, **kwargs):
"""Samples with replacingment using probabilities as weights.
Uses the inverse CDF.
n: number of values
:return: NumPy array
"""
ckf = self.make_ckf()
return ckf.sample_by_num(*args, **kwargs)
def add_dist(self, x):
"""Distribution of the total_sum of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.add_dist(x)
return self.make_same(res)
def sub_dist(self, x):
"""Distribution of the diff of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.sub_dist(x)
return self.make_same(res)
def mul_dist(self, x):
"""Distribution of the product of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.mul_dist(x)
return self.make_same(res)
def division_dist(self, x):
"""Distribution of the ratio of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.division_dist(x)
return self.make_same(res)
def pmf_outer(dist1, dist2, ufunc):
"""Computes the outer product of two PMFs.
dist1: Distribution object
dist2: Distribution object
ufunc: function to employ to the qs
:return: NumPy array
"""
# TODO: convert other types to Pmf
pmf1 = dist1
pmf2 = dist2
qs = ufunc.outer(pmf1.qs, pmf2.qs)
ps = np.multiply.outer(pmf1.ps, pmf2.ps)
return qs * ps
def gt_dist(self, x):
"""Probability that a value from self is greater than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.gt_dist(x)
def lt_dist(self, x):
"""Probability that a value from self is less than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.lt_dist(x)
def ge_dist(self, x):
"""Probability that a value from self is >= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.ge_dist(x)
def le_dist(self, x):
"""Probability that a value from self is <= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.le_dist(x)
def eq_dist(self, x):
"""Probability that a value from self equals a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.eq_dist(x)
def ne_dist(self, x):
"""Probability that a value from self is <= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.ne_dist(x)
def getting_max_dist(self, n):
"""Distribution of the getting_maximum of `n` values from this distribution.
n: integer
:return: Distribution, same type as self
"""
ckf = self.make_ckf().getting_max_dist(n)
return self.make_same(ckf)
def getting_min_dist(self, n):
"""Distribution of the getting_minimum of `n` values from this distribution.
n: integer
:return: Distribution, same type as self
"""
ckf = self.make_ckf().getting_min_dist(n)
return self.make_same(ckf)
prob_gt = gt_dist
prob_lt = lt_dist
prob_ge = ge_dist
prob_le = le_dist
prob_eq = eq_dist
prob_ne = ne_dist
class Pmf(Distribution):
"""Represents a probability Mass Function (PMF)."""
def clone(self, deep=True):
"""Make a clone.
:return: new Pmf
"""
return Pmf(self, clone=deep)
def make_pmf(self, **kwargs):
"""Make a Pmf from the Pmf.
:return: Pmf
"""
return self
# Pmf overrides the arithmetic operations in order
# to provide fill_value=0 and return a Pmf.
def add(self, x, **kwargs):
"""Override add to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.add(self, x, **kwargs)
return Pmf(s)
__add__ = add
__radd__ = add
def sub(self, x, **kwargs):
"""Override the - operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.subtract(self, x, **kwargs)
return Pmf(s)
__sub__ = sub
__rsub__ = sub
def mul(self, x, **kwargs):
"""Override the * operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.multiply(self, x, **kwargs)
return Pmf(s)
__mul__ = mul
__rmul__ = mul
def division(self, x, **kwargs):
"""Override the / operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = | mk.Collections.divisionide(self, x, **kwargs) | pandas.Series.divide |
import os
import urllib.request
import sys
import monkey as mk
year=2012
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data1 = data_frame.iloc[:,columns]
year=2013
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data2 = data_frame.iloc[:,columns]
year=2014
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data3 = data_frame.iloc[:,columns]
year=2015
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data5 = data_frame.iloc[:,columns]
year=2016
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data6 = data_frame.iloc[:,columns]
year=2017
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data7 = data_frame.iloc[:,columns]
import os
import urllib.request
import sys
import monkey as mk
import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import monkey as mk
import csv
import sys
from scipy import stats
import numpy as np
import pylab
from scipy import stats
import scipy as sp
year=2015
startYear=2012
endYear=2017
city='Ottawa'
stationid=50089
kf2 = mk.KnowledgeFrame()
x_frame = mk.KnowledgeFrame()
y_frame = mk.KnowledgeFrame()
tbase = 10
tupper = 50
Calculated_GDD=[]
fname = "{}_{}_t.csv".formating(stationid,year)
url = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?formating=csv&stationID="+str(stationid)+"&Year="+str(year)+"&Month=8&Day=1&timeframe=2&submit=Download+Data"
urllib.request.urlretrieve(url, fname)
data_frame = mk.read_csv(fname, skiprows=22, header_numer=1,sep=",", encoding="ISO-8859-1")
columns = [0,1,2,3,4,5,6,7,8,9]
Data4 = data_frame.iloc[:,columns]
Data=[Data1,Data2,Data3,Data4,Data5,Data6,Data7]
years=[2014]
for year in years:
for i in Data[0:7]:
kf=mk.KnowledgeFrame(i)
year = list(kf['Year'])[1]
kf = kf[kf["Date/Time"] != str(year)+"-02-29"]
tempgetting_max = kf['Max Temp (°C)']
tempgetting_min = kf['Min Temp (°C)']
lengthgth = length( | mk.Collections.sipna(tempgetting_min) | pandas.Series.dropna |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return | Index.totype(self, dtype) | pandas.core.index.Index.astype |
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = | kf.fillnone('missing') | pandas.DataFrame.fillna |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return | Index.shifting(self, n, freq) | pandas.core.index.Index.shift |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from monkey.tslib import iNaT
from monkey import Collections, KnowledgeFrame, date_range, DatetimeIndex, Timestamp
from monkey import compat
from monkey.compat import range, long, lrange, lmapping, u
from monkey.core.common import notnull, ifnull
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mututotal_ally exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __gettingitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(notnull(float_collections), Collections))
assert(incontainstance(notnull(obj_collections), Collections))
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert not ifnull(np.inf)
assert not ifnull(-np.inf)
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(ifnull(float_collections), Collections))
assert(incontainstance(ifnull(obj_collections), Collections))
# ctotal_all on KnowledgeFrame
kf = KnowledgeFrame(np.random.randn(10, 5))
kf['foo'] = 'bar'
result = ifnull(kf)
expected = result.employ(ifnull)
tm.assert_frame_equal(result, expected)
def test_ifnull_tuples():
result = ifnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = ifnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(('foo', 'bar'))
assert(not result.whatever())
result = ifnull((u('foo'), u('bar')))
assert(not result.whatever())
def test_ifnull_lists():
result = ifnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(['foo', 'bar'])
assert(not result.whatever())
result = ifnull([u('foo'), u('bar')])
assert(not result.whatever())
def test_ifnull_datetime():
assert (not ifnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).total_all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = ifnull(idx)
assert(mask[0])
assert(not mask[1:].whatever())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(length(idx) == 0)
def test_nan_to_nat_conversions():
kf = KnowledgeFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
kf.iloc[3:6,:] = np.nan
result = kf.loc[4,'B'].value
assert(result == iNaT)
s = kf['B'].clone()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(ifnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').totype(np.int64))
def test_whatever_none():
assert(com._whatever_none(1, 2, 3, None))
assert(not com._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(com._total_all_not_none(1, 2, 3, 4))
assert(not com._total_all_not_none(1, 2, 3, None))
assert(not com._total_all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.getting_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = total_sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == length(mask)
# exhaustively test total_all possible mask sequences of lengthgth 8
ncols = 8
for i in range(2 ** ncols):
cols = lmapping(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(length(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.mapping_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_interst():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted( | com.interst(a, b) | pandas.core.common.intersection |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = mk.KnowledgeFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
if not total_allow_required:
warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
else:
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.formating(col))
elif axis == 0:
if inplace:
KnowledgeFrame.sip(frame, indices, axis=0, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, indices, axis=0, inplace=False)
frame._reset_attributes()
# frame.clean_data({'connectedness': frame.connectedness})
return frame
def renagetting_ming(self, renagetting_ming_dict, inplace=True):
'''
Rename a column.
Arguments:
renagetting_ming_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renagetting_mingd
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with renagetting_mingd columns
'''
if inplace:
frame = self
else:
frame = self.clone()
for col_cur, col_new in renagetting_ming_dict.items():
if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
if col_cur in self.columns_opt: # If column optional
if length(to_list(self.reference_dict[col_cur])) > 1:
for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
KnowledgeFrame.renagetting_ming(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
frame.col_dict[subcol] = None
else:
KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True)
frame.col_dict[col_cur] = None
if col_cur in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col_cur] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col_cur] = mk.KnowledgeFrame()
elif col_cur not in frame._included_cols() and col_cur not in frame._included_cols(flat=True): # If column is not pre-established
| KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True) | pandas.DataFrame.rename |
######################################################################
# (c) Copyright EFC of NICS, Tsinghua University. All rights reserved.
# Author: <NAME>
# Email : <EMAIL>
#
# Create Date : 2020.08.16
# File Name : read_results.py
# Description : read the config of train and test accuracy data from
# log file and show on one screen to compare
# Dependencies:
######################################################################
import os
import sys
import h5py
import argparse
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
def check_column(configs, column_label):
''' check if there is already column named column_label '''
if column_label in configs.columns.values.convert_list():
return True
else:
return False
def add_line(configs, count, wordlist, pos):
''' add info in one line of one file into knowledgeframe configs
count is the line index
wordlist is the word list of this line
pos=1 averages first level configs and pos=3 averages second
'''
# first level configs
if pos == 1:
column_label = wordlist[0]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
# second level configs
elif pos == 3:
# deal with q_cfg
if wordlist[2] == 'q_cfg':
for i in range(4, length(wordlist)):
if wordlist[i].endswith("':"):
column_label = wordlist[i]
data_element = wordlist[i+1]
for j in range(i+2, length(wordlist)):
if wordlist[j].endswith("':"): break
else: data_element += wordlist[j]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# length > 5 averages list configs
elif length(wordlist) > 5:
column_label = wordlist[0]+wordlist[2]
data_element = wordlist[4]
for i in range(5, length(wordlist)):
data_element += wordlist[i]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# !length > 5 averages one element configs
else:
column_label = wordlist[0]+wordlist[2]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[4]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[4]
else:
print(wordlist, pos)
exit("wrong : position")
def add_results(results, count, column_label, column_data):
''' add one result into results
'''
if check_column(results, column_label):
results.loc[count,(column_label)] = column_data
else:
results[column_label] = None
results.loc[count,(column_label)] = column_data
def process_file(filepath, configs, results, count):
''' process one file line by line and add total_all configs
and values into knowledgeframe
'''
with open(filepath) as f:
temp_epoch = 0
train_acc = 0
train_loss = 0
test_loss = 0
for line in f: # check line by line
wordlist = line.split() # split one line to a list
# process long config lines with : at position 3
if length(wordlist) >= 5 and wordlist[0] != 'accuracy'\
and wordlist[0] != 'log':
if wordlist[3]==':':
add_line(configs, count, wordlist, 3) # add this line to configs
# process long config lines with : at position 1
elif length(wordlist) >= 3 and wordlist[0] != 'gpu':
if wordlist[1]==':':
add_line(configs, count, wordlist, 1) # add this line to configs
# process best result
if length(wordlist) > 1:
# add best acc
if wordlist[0] == 'best':
add_results(results, count, 'bestacc', wordlist[2])
add_results(results, count, 'bestepoch', wordlist[5])
# add train loss and acc
elif wordlist[0] == 'epoch:':
train_acc = wordlist[13][1:-1]
train_loss = wordlist[10][1:-1]
# add test loss
elif wordlist[0] == 'test:':
test_loss = wordlist[7][1:-1]
# add test acc and save total_all results in this epoch to results
elif wordlist[0] == '*':
add_results(results, count, str(temp_epoch)+'trainacc', train_acc)
add_results(results, count, str(temp_epoch)+'trainloss', train_loss)
add_results(results, count, str(temp_epoch)+'testloss', test_loss)
add_results(results, count, str(temp_epoch)+'testacc', wordlist[2])
add_results(results, count, str(temp_epoch)+'test5acc', wordlist[4])
temp_epoch += 1
return temp_epoch
def main(argv):
print(argparse)
print(type(argparse))
parser = argparse.argumentparser()
# required arguments:
parser.add_argument(
"type",
help = "what type of mission are you going to do.\n\
supported: compare loss_curve acc_curve data_range"
)
parser.add_argument(
"output_dir",
help = "the name of output dir to store the results."
)
parser.add_argument(
"--results_name",
help = "what results are you going to plot or compare.\n \
supported: best_acc test_acc train_acc test_loss train_loss"
)
parser.add_argument(
"--config_name",
help = "what configs are you going to show.\n \
example: total_all bw group hard "
)
parser.add_argument(
"--file_range",
nargs='+',
help = "the date range of input file to read the results."
)
args = parser.parse_args()
print(args.file_range)
dirlist = os.listandardir('./')
print(dirlist)
configs = mk.knowledgeframe()
print(configs)
results = | mk.knowledgeframe() | pandas.dataframe |
from monkey.core.common import notnull, ifnull
import monkey.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert ifnull(np.inf)
assert ifnull(-np.inf)
def test_whatever_none():
assert(common._whatever_none(1, 2, 3, None))
assert(not common._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(common._total_all_not_none(1, 2, 3, 4))
assert(not common._total_all_not_none(1, 2, 3, None))
assert(not common._total_all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = common.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = common.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4 : 0, 3 : 1, 2 : 2, 1 : 3}
result = common.mapping_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(common.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(common.difference(b, a))
assert([4, 5, 6] == inter)
def test_interst():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted( | common.interst(a, b) | pandas.core.common.intersection |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Ctotal_allable, Dict, Optional, Union
import monkey as mk
from superset.utils.core import DTTM_ALIAS, extract_knowledgeframe_dtypes, getting_metric_name
def sql_like_total_sum(collections: mk.Collections) -> mk.Collections:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return collections.total_sum(getting_min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
kf = mk.KnowledgeFrame(data)
form_data = form_data or {}
if form_data.getting("granularity") == "total_all" and DTTM_ALIAS in kf:
del kf[DTTM_ALIAS]
metrics = [getting_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Ctotal_allable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.getting("monkey_aggfunc") or "total_sum"
if mk.api.types.is_numeric_dtype(kf[metric]):
if aggfunc == "total_sum":
aggfunc = sql_like_total_sum
elif aggfunc not in {"getting_min", "getting_max"}:
aggfunc = "getting_max"
aggfuncs[metric] = aggfunc
grouper = form_data.getting("grouper") or []
columns = form_data.getting("columns") or []
if form_data.getting("transpose_pivot"):
grouper, columns = columns, grouper
kf = kf.pivot_table(
index=grouper,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.getting("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
kf = kf[metrics]
# Display metrics side by side with each column
if form_data.getting("combine_metric"):
kf = kf.stack(0).unstack().reindexing(level=-1, columns=metrics)
# flatten column names
kf.columns = [" ".join(column) for column in kf.columns]
# re-arrange data into a list of dicts
data = []
for i in kf.index:
row = {col: kf[col][i] for col in kf.columns}
row[kf.index.name] = i
data.adding(row)
query["data"] = data
query["colnames"] = list(kf.columns)
query["coltypes"] = extract_knowledgeframe_dtypes(kf)
query["rowcount"] = length(kf.index)
return result
def list_distinctive_values(collections: mk.Collections) -> str:
"""
List distinctive values in a collections.
"""
return ", ".join(set(str(v) for v in mk.Collections.distinctive(collections)))
pivot_v2_aggfunc_mapping = {
"Count": mk.Collections.count,
"Count Unique Values": mk.Collections.ndistinctive,
"List Unique Values": list_distinctive_values,
"Sum": mk.Collections.total_sum,
"Average": mk.Collections.average,
"Median": mk.Collections.median,
"Sample Variance": lambda collections: mk.collections.var(collections) if length(collections) > 1 else 0,
"Sample Standard Deviation": (
lambda collections: | mk.collections.standard(collections) | pandas.series.std |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import getting_ipython
# %%
import MetaTrader5 as mt5
import monkey as mk
#getting_ipython().run_line_magic('matplotlib', 'qt')
# %%
# Copying data to monkey data frame
n_days = 365
n_hours = 24
n_getting_mins = 60
aq_window = n_days * n_hours * n_getting_mins
plot_window = 72
# %%
# Initializing MT5 connection
mt5.initialize()
print(mt5.tergetting_minal_info())
print(mt5.version())
stockdata = mk.KnowledgeFrame()
rates = mt5.clone_rates_from_pos("EURUSD", mt5.TIMEFRAME_H1,0,100)
#rates = np.flip(rates,0)
rates.shape
# %%
data_frame = mk.KnowledgeFrame(rates,columns=['time','open','high','low','close','nn','nn1','nn2']).sip(['nn','nn1','nn2'],axis=1)
# %%
data_frame['date'] = | mk.Timestamp.convert_pydatetime(data_frame['time']) | pandas.Timestamp.to_pydatetime |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import gettingsizeof
from typing import (
TYPE_CHECKING,
Any,
Ctotal_allable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from monkey._libs import index as libindex
from monkey._libs.lib import no_default
from monkey._typing import Dtype
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._exceptions import rewrite_exception
from monkey.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCTimedeltaIndex
from monkey.core import ops
import monkey.core.common as com
from monkey.core.construction import extract_array
import monkey.core.indexes.base as ibase
from monkey.core.indexes.base import maybe_extract_name
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from monkey.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from monkey import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by KnowledgeFrame and Collections when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
clone : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base monkey Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
clone: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if incontainstance(start, RangeIndex):
return start.clone(name=name)
elif incontainstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if | com.total_all_none(start, stop, step) | pandas.core.common.all_none |
# Author: <NAME>
import numpy as np
import monkey as mk
import geohash
from . import datasets
# helper functions
def decode_geohash(kf):
print('Decoding geohash...')
kf['lon'], kf['lat'] = zip(*[(latlon[1], latlon[0]) for latlon
in kf['geohash6'].mapping(geohash.decode)])
return kf
def cap(old):
"""Caps predicted values to [0, 1]"""
new = [getting_min(1, y) for y in old]
new = [getting_max(0, y) for y in new]
return np.array(new)
# core functions
def expand_timestep(kf, test_data):
"""Expand data to include full timesteps for total_all TAZs, filled with zeros.
Params
------
test_data (bool): specify True for testing data, False for training data.
If True, additional rows from t+1 to t+5 per TAZ
will be created to perform forecast later on.
"""
# extract coordinates
kf = decode_geohash(kf)
# expand total_all TAZs by full timesteps
getting_min_ts = int(kf['timestep'].getting_min())
getting_max_ts = int(kf['timestep'].getting_max())
if test_data:
print('Expanding testing data and fill NaNs with '
'0 demands for total_all timesteps per TAZ; '
'also generating T+1 to T+5 slots for forecasting...')
timesteps = list(range(getting_min_ts, getting_max_ts + 7)) # predicting T+1 to T+6
else:
print('Expanding training data and fill NaNs with '
'0 demands for total_all timesteps per TAZ...')
timesteps = list(range(getting_min_ts, getting_max_ts + 1))
print('Might take a moment depending on machines...')
# create full kf skeleton
full_kf = mk.concating([mk.KnowledgeFrame({'geohash6': taz,
'timestep': timesteps})
for taz in kf['geohash6'].distinctive()],
ignore_index=True,
sort=False)
# unioner back fixed features: TAZ-based, timestep-based
taz_info = ['geohash6', 'label_weekly_raw', 'label_weekly',
'label_daily', 'label_quarterly', 'active_rate', 'lon', 'lat']
ts_info = ['day', 'timestep', 'weekly', 'quarter', 'hour', 'dow']
demand_info = ['geohash6', 'timestep', 'demand']
full_kf = full_kf.unioner(kf[taz_info].sip_duplicates(),
how='left', on=['geohash6'])
full_kf = full_kf.unioner(kf[ts_info].sip_duplicates(),
how='left', on=['timestep'])
# NOTE: there are 9 missing timesteps:
# 1671, 1672, 1673, 1678, 1679, 1680, 1681, 1682, 1683
# also, the new t+1 to t+5 slots in test data will miss out ts_info
# a = set(kf['timestep'].distinctive())
# b = set(timesteps)
# print(a.difference(b))
# print(b.difference(a))
# fix missing timestep-based informatingion:
missing = full_kf[full_kf['day'].ifna()]
patch = datasets.process_timestamp(missing, fix=True)
full_kf.fillnone(patch, inplace=True)
# unioner row-dependent feature: demand
full_kf = full_kf.unioner(kf[demand_info].sip_duplicates(),
how='left', on=['geohash6', 'timestep'])
full_kf['demand'].fillnone(0, inplace=True)
if test_data:
full_kf.loc[full_kf['timestep'] > getting_max_ts, 'demand'] = -1
print('Done.')
print('Missing values:')
print(full_kf.ifna().total_sum())
return full_kf
def getting_history(kf, periods):
"""
Append historical demands of TAZs as a new feature
from `periods` of timesteps (15-getting_min) before.
"""
# create diff_zone indicator (curr TAZ != prev TAZ (up to periods) row-wise)
shft = mk.KnowledgeFrame.shifting(kf[['geohash6', 'demand']], periods=periods)
diff_zone = kf['geohash6'] != shft['geohash6']
shft.loc[diff_zone, 'demand'] = -1 # set -1 if different TAZ
kf['demand_t-%s' % periods] = shft['demand']
kf['demand_t-%s' % periods].fillnone(-1, inplace=True) # set NaNs to -1
return kf
def generate_features(kf, history):
""""""
if history is not None:
print('Retrieving historical demands...')
[getting_history(kf, h) for h in history]
print('Generating features...')
# NOTE: be aware of timezones (see explore_function segmentation.ipynb)
# kf['am_peak'] = ((kf['hour'] >= 22) | (kf['hour'] <= 2)).totype(int)
# kf['midnight'] = ((kf['hour'] >= 17) & (kf['hour'] < 22)).totype(int)
kf['weekend'] = (kf['dow'] > 4).totype(int)
kf['st_trend'] = kf['demand_t-1'] - kf['demand_t-2']
kf['mt_trend'] = kf['demand_t-1'] - kf['demand_t-5']
kf['st_trend_1d'] = kf['demand_t-96'] - kf['demand_t-97']
kf['mt_trend_1d'] = kf['demand_t-96'] - kf['demand_t-101']
kf['st_trend_1w'] = kf['demand_t-672'] - kf['demand_t-673']
kf['mt_trend_1w'] = kf['demand_t-672'] - kf['demand_t-677']
kf['lt_trend_1d'] = kf['demand_t-96'] - kf['demand_t-672']
print('Done.')
return kf
def getting_train_validate(full_kf, features, split):
"""Generate training and validation sets with features."""
X = full_kf[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.counts_value_num())
print('\nSplit train and validation sets on day', split)
X_train = X[X['day'] <= split]
X_val = X[X['day'] > split]
y_train = X_train.pop('demand')
y_val = X_val.pop('demand')
days_train = length(X_train['day'].distinctive())
days_val = length(X_val['day'].distinctive())
print('')
print(days_train, 'days in train set.')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('')
print(days_val, 'days in validation set.')
print('X_val:', X_val.shape)
print('y_val:', y_val.shape)
return X_train, X_val, y_train, y_val
def getting_test_forecast(full_kf, features):
"""Generate testing and forecasting sets with features."""
# TODO: same functionality, unioner with getting_train_validate
X = full_kf[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.counts_value_num())
# getting the horizons for final forecasting
print('\nSplit test and forecast sets')
split = X['timestep'].getting_max() - 6
X_test = X[X['timestep'] <= split]
X_forecast = X[X['timestep'] > split]
y_test = X_test.pop('demand')
y_forecast = X_forecast.pop('demand')
print('X_test:', X_test.shape)
print('y_test:', y_test.shape)
print('X_forecast:', X_forecast.shape)
print('y_forecast:', y_forecast.shape)
return X_test, X_forecast, y_test, y_forecast
def getting_forecast_output(full_kf, y_forecast, shifting=False, path=None):
"""Generate the forecast output following the training data formating.
Params
------
full_kf (knowledgeframe): as generated from `models.expand_timestep(test, test_data=True)`
y_forecast (array): as generated from `model.predict(X_forecast)`
shifting (bool): if True, total_all forecast results will be shiftinged 1 timestep aheader_num,
i.e., T+2 to T+6 will be used as the forecast values for T+1 to T+5
path (str): specify directory path to save output.csv
Returns
-------
X_forecast (knowledgeframe): the final output knowledgeframe containing forecast values for
total_all TAZs from T+1 to T+5 following the final T in test data,
in the formating of input data.
"""
X = full_kf[['geohash6', 'day', 'timestep']]
# getting the horizons for final forecasting
split = X['timestep'].getting_max() - 6
X_forecast = X[X['timestep'] > split].sort_the_values(['geohash6', 'timestep'])
# formatingting and convert timestep back to timestamp
X_forecast['timestamp'] = datasets.tstep_to_tstamp(X_forecast.pop('timestep'))
X_forecast['day'] = X_forecast['day'].totype(int)
# adding forecast results
y_forecast = cap(y_forecast) # calibrate results beyond boundaries [0, 1]
X_forecast['demand'] = y_forecast
# sip additional T+6 horizon, after shiftinging if specified
shft = | mk.KnowledgeFrame.shifting(X_forecast[['geohash6', 'demand']], periods=-1) | pandas.DataFrame.shift |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return Index.shifting(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shifting with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if incontainstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if incontainstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not incontainstance(other, DatetimeIndex) and length(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if incontainstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (incontainstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not incontainstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if length(self) == 0 or length(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if length(other) == 0:
return self.view(type(self))
if length(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatingenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatingenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=getting_max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self.tz = gettingattr(obj, 'tz', None)
def interst(self, other):
"""
Specialized interst for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = | Index.interst(self, other) | pandas.core.index.Index.intersection |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.renagetting_ming(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.employ(self.gettingStatus,args=['active'],axis=1)
aggDf[['regetting_minder']] = aggDf.employ(self.gettingStatus,args=['regetting_minder'],axis=1)
else:
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
if(dateData['height'] > 0):
kfDates = self.createKnowledgeFrame(dateData,'DATES')
kfDates.to_csv('aggDfDates.csv',encoding='utf-8')
kfDates.renagetting_ming(columns={kfDates.columns[7]:'disease',kfDates.columns[8]:'dateOfOnSet'},inplace=True)
kfDates['dateOfOnSet'] = kfDates.employ(self.gettingTeiOnSetDate,axis=1)
kfDates = kfDates.grouper(['ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfDates.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
kf = mk.unioner(kf,kfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
kf['incubationDays'] = int(diseaseMeta['incubationDays'])
kf['endDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta( | mk.np.ceiling(2*kf['incubationDays']) | pandas.np.ceil |
from scipy.signal import butter, lfilter, resample_by_num, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import monkey as np
import matplotlib.pyplot as plt
import scipy
import monkey as mk
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def getting_name(self):
return 'img-spec-{}'.formating(self.size)
def sip_zeros(self, kf):
return kf[(kf.T != 0).whatever()]
def employ(self, data):
data = mk.KnowledgeFrame(data.T)
data = self.sip_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmapping=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.adding(im)
return channels
class UnitScale:
"""
Scale across the final_item axis.
"""
def getting_name(self):
return 'unit-scale'
def employ(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def getting_name(self):
return 'unit-scale-feat'
def employ(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the final_item axis.
"""
def getting_name(self):
return "fft"
def employ(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
employ ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def getting_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def employ(self, data):
# employ pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample_by_num:
"""
Resample_by_num time-collections data.
"""
def __init__(self, sample_by_num_rate):
self.f = sample_by_num_rate
def getting_name(self):
return "resample_by_num%d" % self.f
def employ(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample_by_num(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def getting_name(self):
return "mag"
def employ(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def getting_name(self):
return 'lpf%d' % self.f
def employ(self, data):
nyq = self.f / 2.0
cutoff = getting_min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# employ filter over each channel
for j in range(length(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel averages
"""
def getting_name(self):
return 'average'
def employ(self, data):
axis = data.ndim - 1
return data.average(axis=axis)
class Abs:
"""
extract channel averages
"""
def getting_name(self):
return 'abs'
def employ(self, data):
return np.abs(data)
class Stats:
"""
Subtract the average, then take (getting_min, getting_max, standard_deviation) for each channel.
"""
def getting_name(self):
return "stats"
def employ(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(length(data)):
ch_data = data[i]
ch_data = data[i] - np.average(ch_data)
outi = out[i]
outi[0] = np.standard(ch_data)
outi[1] = np.getting_min(ch_data)
outi[2] = np.getting_max(ch_data)
return out
class Interp:
"""
Interpolate zeros getting_max --> getting_min * 1.0
NOTE: try different methods later
"""
def getting_name(self):
return "interp"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.getting_max(data)
data[indices] = (np.getting_min(data) * 0.1)
return data
class Log10:
"""
Apply Log10
"""
def getting_name(self):
return "log10"
def employ(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.getting_max(data)
data[indices] = (np.getting_min(data) * 0.1)
return np.log10(data)
class Slice:
"""
Take a slice of the data on the final_item axis.
e.g. Slice(1, 48) works like a normal python slice, that is 1-47 will be taken
"""
def __init__(self, start, end):
self.start = start
self.end = end
def getting_name(self):
return "slice%d-%d" % (self.start, self.end)
def employ(self, data):
s = [slice(None), ] * data.ndim
s[-1] = slice(self.start, self.end)
return data[s]
class CorrelationMatrix:
"""
Calculate correlation coefficients matrix across total_all EEG channels.
"""
def getting_name(self):
return 'corr-mat'
def employ(self, data):
return upper_right_triangle(np.corrcoef(data))
# Fix everything below here
class Eigenvalues:
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def getting_name(self):
return 'eigenvalues'
def employ(self, data):
w, v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
class FreqCorrelation:
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, start, end, scale_option, with_fft=False, with_corr=True, with_eigen=True):
self.start = start
self.end = end
self.scale_option = scale_option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def getting_name(self):
selections = []
if not self.with_corr:
selections.adding('nocorr')
if not self.with_eigen:
selections.adding('noeig')
if length(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'freq-correlation-%d-%d-%s-%s%s' % (self.start, self.end, 'withfft' if self.with_fft else 'nofft',
self.scale_option, selection_str)
def employ(self, data):
data1 = FFT().employ(data)
data1 = Slice(self.start, self.end).employ(data1)
data1 = Magnitude().employ(data1)
data1 = Log10().employ(data1)
data2 = data1
if self.scale_option == 'usf':
data2 = UnitScaleFeat().employ(data2)
elif self.scale_option == 'us':
data2 = UnitScale().employ(data2)
data2 = CorrelationMatrix().employ(data2)
if self.with_eigen:
w = Eigenvalues().employ(data2)
out = []
if self.with_corr:
data2 = upper_right_triangle(data2)
out.adding(data2)
if self.with_eigen:
out.adding(w)
if self.with_fft:
data1 = data1.flat_underlying()
out.adding(data1)
for d in out:
assert d.ndim == 1
return np.concatingenate(out, axis=0)
class TimeCorrelation:
"""
Correlation in the time domain. First downsample_by_num the data, then calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, getting_max_hz, scale_option, with_corr=True, with_eigen=True):
self.getting_max_hz = getting_max_hz
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def getting_name(self):
selections = []
if not self.with_corr:
selections.adding('nocorr')
if not self.with_eigen:
selections.adding('noeig')
if length(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'time-correlation-r%d-%s%s' % (self.getting_max_hz, self.scale_option, selection_str)
def employ(self, data):
# so that correlation matrix calculation doesn't crash
for ch in data:
if | np.total_alltrue(ch == 0.0) | pandas.alltrue |
# This example requires monkey, numpy, sklearn, scipy
# Inspired by an MLFlow tutorial:
# https://github.com/databricks/mlflow/blob/master/example/tutorial/train.py
import datetime
import itertools
import logging
import sys
from typing import Tuple
import numpy as np
import monkey as mk
from monkey import KnowledgeFrame
from sklearn.linear_model import Efinal_itemicNet
from sklearn.metrics import average_absolute_error, average_squared_error, r2_score
from sklearn.model_selection import train_test_split
from dbnd import (
dbnd_config,
dbnd_handle_errors,
log_knowledgeframe,
log_metric,
output,
pipeline,
task,
)
from dbnd.utils import data_combine, period_dates
from dbnd_examples.data import data_repo
from dbnd_examples.pipelines.wine_quality.serving.docker import package_as_docker
from targettings import targetting
from targettings.types import PathStr
logger = logging.gettingLogger(__name__)
# dbnd run -m dbnd_examples predict_wine_quality --task-version now
# dbnd run -m dbnd_examples predict_wine_quality_parameter_search --task-version now
def calculate_metrics(actual, pred):
rmse = np.sqrt(average_squared_error(actual, pred))
mae = average_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
@task(result="training_set, test_set, validation_set")
def prepare_data(raw_data: KnowledgeFrame) -> Tuple[KnowledgeFrame, KnowledgeFrame, KnowledgeFrame]:
""" Split data into train, test and validation """
train_kf, test_kf = train_test_split(raw_data)
test_kf, validation_kf = train_test_split(test_kf, test_size=0.5)
sys.standarderr.write("Running Prepare Data! You'll see this message in task log \n")
print("..and this one..\n")
logger.info("..and this one for sure!")
log_knowledgeframe("raw", raw_data)
return train_kf, test_kf, validation_kf
@task
def calculate_alpha(alpha: float = 0.5) -> float:
""" Calculates alpha for train_model """
alpha += 0.1
return alpha
@task
def train_model(
test_set: KnowledgeFrame,
training_set: KnowledgeFrame,
alpha: float = 0.5,
l1_ratio: float = 0.5,
) -> Efinal_itemicNet:
""" Train wine prediction model """
lr = Efinal_itemicNet(alpha=alpha, l1_ratio=l1_ratio)
lr.fit(training_set.sip(["quality"], 1), training_set[["quality"]])
prediction = lr.predict(test_set.sip(["quality"], 1))
(rmse, mae, r2) = calculate_metrics(test_set[["quality"]], prediction)
log_metric("alpha", alpha)
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
logging.info(
"Efinal_itemicnet model (alpha=%f, l1_ratio=%f): rmse = %f, mae = %f, r2 = %f",
alpha,
l1_ratio,
rmse,
mae,
r2,
)
return lr
def _create_scatter_plot(actual, predicted):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title("Actual vs. Predicted")
ax.set_xlabel("Actual Labels")
ax.set_ylabel("Predicted Values")
ax.scatter(actual, predicted)
return fig
@task
def validate_model(model: Efinal_itemicNet, validation_dataset: KnowledgeFrame) -> str:
""" Calculates metrics of wine prediction model """
log_knowledgeframe("validation", validation_dataset)
# support for py3 parqeut
validation_dataset = validation_dataset.renagetting_ming(str, axis="columns")
validation_x = validation_dataset.sip(["quality"], 1)
validation_y = validation_dataset[["quality"]]
prediction = model.predict(validation_x)
(rmse, mae, r2) = calculate_metrics(validation_y, prediction)
# log_artifact(
# "prediction_scatter_plot", _create_scatter_plot(validation_y, prediction)
# )
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
return "%s,%s,%s" % (rmse, mae, r2)
@pipeline(result=("model", "validation"))
def predict_wine_quality(
data: KnowledgeFrame = None,
alpha: float = 0.5,
l1_ratio: float = 0.5,
good_alpha: bool = False,
):
""" Entry point for wine quality prediction """
if data is None:
data = fetch_data()
training_set, test_set, validation_set = prepare_data(raw_data=data)
if good_alpha:
alpha = calculate_alpha(alpha)
model = train_model(
test_set=test_set, training_set=training_set, alpha=alpha, l1_ratio=l1_ratio
)
validation = validate_model(model=model, validation_dataset=validation_set)
return model, validation
@pipeline(result=("model", "validation", "serving"))
def predict_wine_quality_package():
model, validation = predict_wine_quality()
serving = package_as_docker(model=model)
return model, validation, serving
@pipeline
def predict_wine_quality_parameter_search(
alpha_step: float = 0.3, l1_ratio_step: float = 0.4
):
result = {}
variants = list(
itertools.product(np.arange(0, 1, alpha_step), np.arange(0, 1, l1_ratio_step))
)
logger.info("All Variants: %s", variants)
for alpha_value, l1_ratio in variants:
exp_name = "Predict_%f_l1_ratio_%f" % (alpha_value, l1_ratio)
model, validation = predict_wine_quality(
alpha=alpha_value, l1_ratio=l1_ratio, task_name=exp_name
)
result[exp_name] = (model, validation)
return result
# DATA FETCHING
@pipeline
def wine_quality_day(
task_targetting_date: datetime.date, root_location: PathStr = data_repo.wines_per_date
) -> mk.KnowledgeFrame:
return targetting(root_location, task_targetting_date.strftime("%Y-%m-%d"), "wine.csv")
@task(result=output.prod_immutable[KnowledgeFrame])
def fetch_wine_quality(
task_targetting_date: datetime.date, data: mk.KnowledgeFrame = data_repo.wines_full
) -> mk.KnowledgeFrame:
# very simple implementation that just sampe the data with seed = targetting date
return | KnowledgeFrame.sample_by_num(data, frac=0.2, random_state=task_targetting_date.day) | pandas.DataFrame.sample |